diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 0fe9c72df..0e248a95e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -10,8 +10,8 @@ Resolves # **Checklist:** - [ ] **Commit Message Formatting**: Commit titles and messages follow guidelines in the [developer guide](https://rook.io/docs/rook/master/development-flow.html#commit-structure). -- [ ] **Skip Tests for Docs**: Add the flag for skipping the build if this is only a documentation change. See [here](https://github.com/rook/rook/blob/master/INSTALL.md#skip-ci) for the flag. -- [ ] **Skip Unrelated Tests**: Add a flag to run tests for a specific storage provider. See [test options](https://github.com/rook/rook/blob/master/INSTALL.md#test-storage-provider). +- [ ] **Skip Tests for Docs**: Add the flag for skipping the build if this is only a documentation change. See [here](https://github.com/rook/cassandra/blob/master/INSTALL.md#skip-ci) for the flag. +- [ ] **Skip Unrelated Tests**: Add a flag to run tests for a specific storage provider. See [test options](https://github.com/rook/cassandra/blob/master/INSTALL.md#test-storage-provider). - [ ] Reviewed the developer guide on [Submitting a Pull Request](https://rook.io/docs/rook/master/development-flow.html#submitting-a-pull-request) - [ ] Documentation has been updated, if necessary. - [ ] Unit tests have been added, if necessary. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 365a88b67..235f7c4be 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,75 +8,33 @@ defaults: shell: bash --noprofile --norc -eo pipefail -x {0} jobs: - macos-build: - runs-on: macos-10.15 - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: copy working directory to GOPATH - run: sudo mkdir -p /Users/runner/go/src/github.com && sudo cp -a /Users/runner/work/rook /Users/runner/go/src/github.com/ - - - name: build rook - working-directory: /Users/runner/go/src/github.com/rook/rook - run: | - GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' BUILD_CONTAINER_IMAGE=false build - - - name: run codegen - working-directory: /Users/runner/go/src/github.com/rook/rook - run: GOPATH=$(go env GOPATH) make codegen - - - name: validate codegen - working-directory: /Users/runner/go/src/github.com/rook/rook - run: tests/scripts/validate_modified_files.sh codegen - - - name: run mod check - run: GOPATH=$(go env GOPATH) make -j $(nproc) mod.check - - - name: validate modcheck - run: tests/scripts/validate_modified_files.sh modcheck - - - name: run crds-gen - working-directory: /Users/runner/go/src/github.com/rook/rook - run: make csv-clean && GOPATH=$(go env GOPATH) make crds - - - name: validate crds-gen - working-directory: /Users/runner/go/src/github.com/rook/rook - run: tests/scripts/validate_modified_files.sh crd - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - linux-build-all: runs-on: ubuntu-18.04 + strategy: + fail-fast: false + matrix: + go-version : ['1.16', '1.17'] steps: - name: checkout uses: actions/checkout@v2 with: fetch-depth: 0 - - name: setup golang + - name: setup golang ${{ matrix.go-version }} uses: actions/setup-go@v2 with: - go-version: 1.16 + go-version: ${{ matrix.go-version }} - name: set up QEMU uses: docker/setup-qemu-action@master with: platforms: all - - name: build.all rook - run: tests/scripts/github-action-helper.sh build_rook_all + - name: build.all rook with go ${{ matrix.go-version }} + run: | + tests/scripts/github-action-helper.sh build_rook_all - name: setup tmate session for debugging if: failure() uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 + timeout-minutes: 60 diff --git a/.github/workflows/canary-integration-test-arm64.yml b/.github/workflows/canary-integration-test-arm64.yml deleted file mode 100644 index d9c79c6cc..000000000 --- a/.github/workflows/canary-integration-test-arm64.yml +++ /dev/null @@ -1,82 +0,0 @@ -name: Canary integration tests ARM64 -on: - schedule: - - cron: '0 0 * * *' # every day at midnight - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - canary-arm64: - runs-on: [self-hosted, ubuntu-20.04, ARM64] - if: github.repository == 'rook/rook' - env: - BLOCK: /dev/sdb - - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: teardown minikube and docker - run: | - uptime - minikube delete - docker system prune -a - - - name: setup minikube - run: | - # sudo apt-get install build-essential -y - # curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-arm64 - # sudo install minikube-linux-arm64 /usr/local/bin/minikube - # sudo rm -f minikube-linux-arm64 - # curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" - # sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl - minikube start --memory 28g --cpus=12 --driver=docker - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk and create partitions for osds - run: | - tests/scripts/github-action-helper.sh use_local_disk - tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --osd-count 1 - - - name: validate-yaml - run: tests/scripts/github-action-helper.sh validate_yaml - - - name: deploy cluster - run: | - # removing liveness probes since the env is slow and the probe is killing the daemons - yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.mon.disabled" true - yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.mgr.disabled" true - yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.osd.disabled" true - tests/scripts/github-action-helper.sh deploy_cluster - # there are no package for arm64 nfs-ganesha - kubectl delete -f cluster/examples/kubernetes/ceph/nfs-test.yaml - - - name: wait for prepare pod - run: timeout 900 sh -c 'until kubectl -n rook-ceph logs -f $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'); do sleep 5; done' || kubectl -n rook-ceph get all && kubectl logs -n rook-ceph deploy/rook-ceph-operator - - - name: wait for ceph to be ready - run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready all 1 - - - name: teardown minikube and docker - run: | - minikube delete - docker system prune -a - - - name: upload canary test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: canary-arm64 - path: test diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml deleted file mode 100644 index 5a3f076e9..000000000 --- a/.github/workflows/canary-integration-test.yml +++ /dev/null @@ -1,879 +0,0 @@ -name: Canary integration tests -on: - push: - tags: - - v* - branches: - - master - - release-* - pull_request: - branches: - - master - - release-* - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - canary: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.19.2' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk and create partitions for osds - run: | - tests/scripts/github-action-helper.sh use_local_disk - tests/scripts/github-action-helper.sh create_partitions_for_osds - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: validate-yaml - run: tests/scripts/github-action-helper.sh validate_yaml - - - name: deploy cluster - run: tests/scripts/github-action-helper.sh deploy_cluster - - - name: wait for prepare pod - run: timeout 300 sh -c 'until kubectl -n rook-ceph logs -f $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'); do sleep 5; done' - - - name: wait for ceph to be ready - run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready all 2 - - - name: test external script create-external-cluster-resources.py - run: | - toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}') - timeout 15 sh -c "until kubectl -n rook-ceph exec $toolbox -- ceph mgr dump -f json|jq --raw-output .active_addr|grep -Eosq \"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\" ; do sleep 1 && echo 'waiting for the manager IP to be available'; done" - mgr_raw=$(kubectl -n rook-ceph exec $toolbox -- ceph mgr dump -f json|jq --raw-output .active_addr) - timeout 60 sh -c "until kubectl -n rook-ceph exec $toolbox -- curl --silent --show-error ${mgr_raw%%:*}:9283; do echo 'waiting for mgr prometheus exporter to be ready' && sleep 1; done" - kubectl -n rook-ceph exec $toolbox -- /bin/bash -c "echo \"$(kubectl get pods -o wide -n rook-ceph -l app=rook-ceph-mgr --no-headers=true|head -n1|awk '{print $6"\t"$1}')\" >>/etc/hosts" - kubectl -n rook-ceph exec $toolbox -- mkdir -p /etc/ceph/test-data - kubectl -n rook-ceph cp cluster/examples/kubernetes/ceph/test-data/ceph-status-out $toolbox:/etc/ceph/test-data/ - kubectl -n rook-ceph cp cluster/examples/kubernetes/ceph/create-external-cluster-resources.py $toolbox:/etc/ceph - timeout 10 sh -c "until kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool; do echo 'waiting for script to succeed' && sleep 1; done" - - - name: run external script create-external-cluster-resources.py unit tests - run: | - kubectl -n rook-ceph exec $(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}') -- python3 -m unittest /etc/ceph/create-external-cluster-resources.py - # write a test file - # copy the test file - # execute the test file - - - name: check-ownerreferences - run: tests/scripts/github-action-helper.sh check_ownerreferences - - - name: Upload canary test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: canary - path: test - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - pvc: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: install deps - run: tests/scripts/github-action-helper.sh install_deps - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.19.2' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: use local disk and create partitions for osds - run: | - tests/scripts/github-action-helper.sh use_local_disk - tests/scripts/github-action-helper.sh create_partitions_for_osds - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: create cluster prerequisites - run: | - BLOCK=$(sudo lsblk --paths|awk '/14G/ {print $1}'| head -1) - tests/scripts/localPathPV.sh "$BLOCK" - tests/scripts/github-action-helper.sh create_cluster_prerequisites - - - name: deploy cluster - run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml - yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false - yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 - yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi - kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml - - - name: wait for prepare pod - run: | - timeout 180 sh -c '[ $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'|wc -l) -eq 2 ]; do sleep 5; done'||true - for prepare in $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'); do - kubectl -n rook-ceph logs -f $prepare - break - done - timeout 60 sh -c 'until kubectl -n rook-ceph logs $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd,ceph_daemon_id=0 -o jsonpath='{.items[*].metadata.name}') --all-containers; do echo "waiting for osd container" && sleep 1; done'||true - kubectl -n rook-ceph describe job/$prepare||true - kubectl -n rook-ceph describe deploy/rook-ceph-osd-0||true - - - name: wait for ceph to be ready - run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 2 - - - name: check-ownerreferences - run: tests/scripts/github-action-helper.sh check_ownerreferences - - - name: Upload pvc test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: pvc - path: test - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - pvc-db: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: install deps - run: tests/scripts/github-action-helper.sh install_deps - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.19.2' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk - - - name: create bluestore partitions and PVCs - run: tests/scripts/github-action-helper.sh create_bluestore_partitions_and_pvcs - - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: create cluster prerequisites - run: tests/scripts/github-action-helper.sh create_cluster_prerequisites - - - name: deploy cluster - run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml - yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false - cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml - - - name: wait for prepare pod - run: tests/scripts/github-action-helper.sh wait_for_prepare_pod - - - name: wait for ceph to be ready - run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 - - - name: Upload pvc-db test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: pvc-db - path: test - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - pvc-db-wal: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: install deps - run: | - tests/scripts/github-action-helper.sh install_deps - sudo apt-get install -y gdisk - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.19.2' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk - - - name: create bluestore partitions and PVCs for wal - run: tests/scripts/github-action-helper.sh create_bluestore_partitions_and_pvcs_for_wal - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: create cluster prerequisites - run: tests/scripts/github-action-helper.sh create_cluster_prerequisites - - - name: deploy rook - run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml - yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false - cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml - cat tests/manifests/test-on-pvc-wal.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml - - - name: wait for prepare pod - run: tests/scripts/github-action-helper.sh wait_for_prepare_pod - - - name: wait for ceph to be ready - run: | - tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 - kubectl -n rook-ceph get pods - - - name: Upload pvc-db-wal test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: pvc-db-wal - path: test - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - encryption-pvc: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.19.2' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: install deps - run: tests/scripts/github-action-helper.sh install_deps - - - name: use local disk and create partitions for osds - run: | - tests/scripts/github-action-helper.sh use_local_disk - tests/scripts/github-action-helper.sh create_partitions_for_osds - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: create cluster prerequisites - run: | - tests/scripts/localPathPV.sh $(lsblk --paths|awk '/14G/ {print $1}'| head -1) - tests/scripts/github-action-helper.sh create_cluster_prerequisites - - - name: deploy cluster - run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml - yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 - yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi - kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml - - - name: wait for prepare pod - run: tests/scripts/github-action-helper.sh wait_for_prepare_pod - - - name: wait for ceph to be ready - run: | - tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 2 - kubectl -n rook-ceph get secrets - sudo lsblk - - - name: Upload encryption-pvc test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: encryption-pvc - path: test - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - encryption-pvc-db: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: install deps - run: | - tests/scripts/github-action-helper.sh install_deps - sudo apt-get install -y gdisk - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.19.2' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk - - - name: create bluestore partitions and PVCs - run: tests/scripts/github-action-helper.sh create_bluestore_partitions_and_pvcs - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: create cluster prerequisites - run: tests/scripts/github-action-helper.sh create_cluster_prerequisites - - - name: deploy cluster - run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml - cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml - - - name: wait for prepare pod - run: tests/scripts/github-action-helper.sh wait_for_prepare_pod - - - name: wait for ceph to be ready - run: | - tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 - kubectl -n rook-ceph get pods - kubectl -n rook-ceph get secrets - - - name: Upload encryption-pvc-db-wal test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: encryption-pvc-db-wal - path: test - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - encryption-pvc-db-wal: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: install deps - run: | - tests/scripts/github-action-helper.sh install_deps - sudo apt-get install -y gdisk - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.19.2' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk - - - name: create bluestore partitions and PVCs for wal - run: tests/scripts/github-action-helper.sh create_bluestore_partitions_and_pvcs_for_wal - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: create cluster prerequisites - run: tests/scripts/github-action-helper.sh create_cluster_prerequisites - - - name: deploy rook - run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml - cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml - cat tests/manifests/test-on-pvc-wal.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml - - - name: wait for prepare pod - run: tests/scripts/github-action-helper.sh wait_for_prepare_pod - - - name: wait for ceph to be ready - run: | - tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 - kubectl -n rook-ceph get pods - kubectl -n rook-ceph get secrets - - - name: Upload encryption-pvc-db test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: encryption-pvc-db - path: test - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - - encryption-pvc-kms-vault-token-auth: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.19.2' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: install deps - run: tests/scripts/github-action-helper.sh install_deps - - - name: use local disk and create partitions for osds - run: | - tests/scripts/github-action-helper.sh use_local_disk - tests/scripts/github-action-helper.sh create_partitions_for_osds - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: create cluster prerequisites - run: | - tests/scripts/localPathPV.sh $(lsblk --paths|awk '/14G/ {print $1}'| head -1) - tests/scripts/github-action-helper.sh create_cluster_prerequisites - - - name: deploy vault - run: tests/scripts/deploy-validate-vault.sh deploy - - - name: deploy cluster - run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml - cat tests/manifests/test-kms-vault.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml - yq merge --inplace --arrays append tests/manifests/test-cluster-on-pvc-encrypted.yaml tests/manifests/test-kms-vault-spec.yaml - yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 - yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi - kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - yq merge --inplace --arrays append tests/manifests/test-object.yaml tests/manifests/test-kms-vault-spec.yaml - sed -i 's/ver1/ver2/g' tests/manifests/test-object.yaml - kubectl create -f tests/manifests/test-object.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml - - - name: wait for prepare pod - run: tests/scripts/github-action-helper.sh wait_for_prepare_pod - - - name: wait for ceph to be ready - run: | - tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 2 - tests/scripts/validate_cluster.sh rgw - kubectl -n rook-ceph get pods - kubectl -n rook-ceph get secrets - - - name: validate osd vault - run: | - tests/scripts/deploy-validate-vault.sh validate_osd - sudo lsblk - - - name: validate rgw vault - run: | - tests/scripts/deploy-validate-vault.sh validate_rgw - - - name: Upload encryption-pvc-kms-vault-token-auth test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: encryption-pvc-kms-vault-token-auth - path: test - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - lvm-pvc: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: install deps - run: tests/scripts/github-action-helper.sh install_deps - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.19.2' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: create LV on disk - run: tests/scripts/github-action-helper.sh create_LV_on_disk - - - name: deploy cluster - run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml - yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false - kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml - - - name: wait for prepare pod - run: tests/scripts/github-action-helper.sh wait_for_prepare_pod - - - name: wait for ceph to be ready - run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 - - - name: check-ownerreferences - run: tests/scripts/github-action-helper.sh check_ownerreferences - - - name: Upload pvc test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: lvm-pvc - path: test - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - multi-cluster-mirroring: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: install deps - run: tests/scripts/github-action-helper.sh install_deps - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.19.2' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: use local disk into two partitions - run: | - BLOCK=$(sudo lsblk --paths|awk '/14G/ {print $1}'| head -1) - BLOCK_DATA_PART=${BLOCK}1 - sudo dmsetup version||true - sudo swapoff --all --verbose - sudo umount /mnt - # search for the device since it keeps changing between sda and sdb - sudo wipefs --all --force "$BLOCK_DATA_PART" - tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --osd-count 2 - sudo lsblk - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: deploy first cluster rook - run: | - BLOCK=$(sudo lsblk|awk '/14G/ {print $1}'| head -1) - cd cluster/examples/kubernetes/ceph/ - kubectl create -f crds.yaml -f common.yaml -f operator.yaml - yq w -i -d1 cluster-test.yaml spec.dashboard.enabled false - yq w -i -d1 cluster-test.yaml spec.storage.useAllDevices false - yq w -i -d1 cluster-test.yaml spec.storage.deviceFilter ${BLOCK}1 - kubectl create -f cluster-test.yaml -f rbdmirror.yaml -f filesystem-mirror.yaml -f toolbox.yaml - - # cephfs-mirroring is a push operation - # running bootstrap create on secondary and bootstrap import on primary. mirror daemons on primary. - - name: deploy second cluster rook - run: | - BLOCK=$(sudo lsblk|awk '/14G/ {print $1}'| head -1) - cd cluster/examples/kubernetes/ceph/ - NAMESPACE=rook-ceph-secondary envsubst < common-second-cluster.yaml | kubectl create -f - - sed -i 's/namespace: rook-ceph/namespace: rook-ceph-secondary/g' cluster-test.yaml rbdmirror.yaml - yq w -i -d1 cluster-test.yaml spec.storage.deviceFilter ${BLOCK}2 - yq w -i -d1 cluster-test.yaml spec.dataDirHostPath "/var/lib/rook-secondary" - yq w -i toolbox.yaml metadata.namespace rook-ceph-secondary - kubectl create -f cluster-test.yaml -f rbdmirror.yaml -f toolbox.yaml - - - name: wait for ceph cluster 1 to be ready - run: | - mkdir test - tests/scripts/validate_cluster.sh osd 1 - kubectl -n rook-ceph get pods - - - name: create replicated mirrored pool on cluster 1 - run: | - cd cluster/examples/kubernetes/ceph/ - yq w -i pool-test.yaml spec.mirroring.enabled true - yq w -i pool-test.yaml spec.mirroring.mode image - kubectl create -f pool-test.yaml - timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool to created on cluster 1" && sleep 1; done' - - - name: create replicated mirrored pool 2 on cluster 1 - run: | - cd cluster/examples/kubernetes/ceph/ - yq w -i pool-test.yaml metadata.name replicapool2 - kubectl create -f pool-test.yaml - timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool2 -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool2 to created on cluster 2" && sleep 1; done' - yq w -i pool-test.yaml metadata.name replicapool - - - name: create replicated mirrored pool on cluster 2 - run: | - cd cluster/examples/kubernetes/ceph/ - yq w -i pool-test.yaml metadata.namespace rook-ceph-secondary - kubectl create -f pool-test.yaml - timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool to created on cluster 1" && sleep 1; done' - - - name: create replicated mirrored pool 2 on cluster 2 - run: | - cd cluster/examples/kubernetes/ceph/ - yq w -i pool-test.yaml metadata.name replicapool2 - kubectl create -f pool-test.yaml - timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool2 to created on cluster 2" && sleep 1; done' - - - name: create images in the pools - run: | - kubectl exec -n rook-ceph deploy/rook-ceph-tools -ti -- rbd -p replicapool create test -s 1G - kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd mirror image enable replicapool/test snapshot - kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool info test - kubectl exec -n rook-ceph deploy/rook-ceph-tools -ti -- rbd -p replicapool2 create test -s 1G - kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd mirror image enable replicapool2/test snapshot - kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool2 info test - - - name: copy block mirror peer secret into the other cluster for replicapool - run: | - kubectl -n rook-ceph get secret pool-peer-token-replicapool -o yaml |\ - sed 's/namespace: rook-ceph/namespace: rook-ceph-secondary/g; s/name: pool-peer-token-replicapool/name: pool-peer-token-replicapool-config/g' |\ - kubectl create --namespace=rook-ceph-secondary -f - - - - name: copy block mirror peer secret into the other cluster for replicapool2 (using cluster global peer) - run: | - kubectl -n rook-ceph get secret cluster-peer-token-my-cluster -o yaml |\ - sed 's/namespace: rook-ceph/namespace: rook-ceph-secondary/g; s/name: cluster-peer-token-my-cluster/name: cluster-peer-token-my-cluster-config/g' |\ - kubectl create --namespace=rook-ceph-secondary -f - - - - name: add block mirror peer secret to the other cluster for replicapool - run: | - kubectl -n rook-ceph-secondary patch cephblockpool replicapool --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["pool-peer-token-replicapool-config"]}}}}' - - - name: add block mirror peer secret to the other cluster for replicapool2 (using cluster global peer) - run: | - kubectl -n rook-ceph-secondary patch cephblockpool replicapool2 --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["cluster-peer-token-my-cluster-config"]}}}}' - - - name: verify image has been mirrored for replicapool - run: | - # let's wait a bit for the image to be present - timeout 120 sh -c 'until [ "$(kubectl exec -n rook-ceph-secondary deploy/rook-ceph-tools -t -- rbd -p replicapool ls|grep -c test)" -eq 1 ]; do echo "waiting for image to be mirrored in pool replicapool" && sleep 1; done' - - - name: verify image has been mirrored for replicapool2 - run: | - # let's wait a bit for the image to be present - timeout 120 sh -c 'until [ "$(kubectl exec -n rook-ceph-secondary deploy/rook-ceph-tools -t -- rbd -p replicapool2 ls|grep -c test)" -eq 1 ]; do echo "waiting for image to be mirrored in pool replicapool2" && sleep 1; done' - - - name: display cephblockpool and image status - run: | - timeout 80 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.mirroringStatus.summary.daemon_health}'|grep -c OK)" -eq 1 ]; do echo "waiting for mirroring status to be updated in replicapool" && sleep 1; done' - timeout 80 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool2 -o jsonpath='{.status.mirroringStatus.summary.daemon_health}'|grep -c OK)" -eq 1 ]; do echo "waiting for mirroring status to be updated in replicapool2" && sleep 1; done' - kubectl -n rook-ceph-secondary get cephblockpool replicapool -o yaml - kubectl -n rook-ceph-secondary get cephblockpool replicapool2 -o yaml - kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool info test - kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool2 info test - - - name: create replicated mirrored filesystem on cluster 1 - run: | - PRIMARY_YAML=cluster/examples/kubernetes/ceph/filesystem-test-primary.yaml - cp cluster/examples/kubernetes/ceph/filesystem-test.yaml "$PRIMARY_YAML" - yq merge --inplace --arrays append "$PRIMARY_YAML" tests/manifests/test-fs-mirror-spec.yaml - kubectl create -f "$PRIMARY_YAML" - timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephfilesystem myfs -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for filesystem to be created" && sleep 1; done' - - - name: create replicated mirrored filesystem on cluster 2 - run: | - cd cluster/examples/kubernetes/ceph/ - yq w -i filesystem-test.yaml metadata.namespace rook-ceph-secondary - yq w -i filesystem-test.yaml spec.mirroring.enabled true - kubectl create -f filesystem-test.yaml - timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephfilesystem myfs -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for filesystem to be created" && sleep 1; done' - - - name: copy filesystem mirror peer secret from the secondary cluster to the primary one - run: | - kubectl -n rook-ceph-secondary get secret fs-peer-token-myfs -o yaml |\ - sed '/ownerReferences/,+6d' |\ - sed 's/namespace: rook-ceph-secondary/namespace: rook-ceph/g; s/name: fs-peer-token-myfs/name: fs-peer-token-myfs-config/g' |\ - kubectl create --namespace=rook-ceph -f - - - - name: add filesystem mirror peer secret to the primary cluster - run: | - kubectl -n rook-ceph patch cephfilesystem myfs --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["fs-peer-token-myfs-config"]}}}}' - - - name: verify fs mirroring is working - run: | - timeout 45 sh -c 'until [ "$(kubectl -n rook-ceph exec -t deploy/rook-ceph-fs-mirror -- ls -1 /var/run/ceph/|grep -c asok)" -eq 3 ]; do echo "waiting for connection to peer" && sleep 1; done' - sockets=$(kubectl -n rook-ceph exec -t deploy/rook-ceph-fs-mirror -- ls -1 /var/run/ceph/) - status=$(for socket in $sockets; do minikube kubectl -- -n rook-ceph exec -t deploy/rook-ceph-fs-mirror -- ceph --admin-daemon /var/run/ceph/$socket help|awk -F ":" '/get filesystem mirror status/ {print $1}'; done) - if [ "${#status}" -lt 1 ]; then echo "peer addition failed" && exit 1; fi - - - name: display cephfilesystem and fs mirror daemon status - run: | - kubectl -n rook-ceph get cephfilesystem myfs -o yaml - # the check is not super ideal since 'mirroring_failed' is only displayed when there is a failure but not when it's working... - timeout 60 sh -c 'while [ "$(kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- ceph fs snapshot mirror daemon status myfs|jq -r '.[0].filesystems[0]'|grep -c "mirroring_failed")" -eq 1 ]; do echo "waiting for filesystem to be mirrored" && sleep 1; done' - - - name: upload test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: multi-cluster-mirroring - path: test - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 diff --git a/.github/workflows/codegen.yml b/.github/workflows/codegen.yml index 81ba0ec66..72108125f 100644 --- a/.github/workflows/codegen.yml +++ b/.github/workflows/codegen.yml @@ -30,12 +30,12 @@ jobs: fetch-depth: 0 - name: copy working directory to GOPATH - run: sudo mkdir -p /home/runner/go/src/github.com && sudo cp -a /home/runner/work/rook /home/runner/go/src/github.com/ + run: sudo mkdir -p /home/runner/go/src/github.com/rook && sudo cp -a /home/runner/work/cassandra/cassandra /home/runner/go/src/github.com/rook/. - name: run codegen - working-directory: /home/runner/go/src/github.com/rook/rook + working-directory: /home/runner/go/src/github.com/rook/cassandra run: GOPATH=$(go env GOPATH) make codegen - name: validate codegen - working-directory: /home/runner/go/src/github.com/rook/rook + working-directory: /home/runner/go/src/github.com/rook/cassandra run: tests/scripts/validate_modified_files.sh codegen diff --git a/.github/workflows/crds-gen.yml b/.github/workflows/crds-gen.yml index ad3bffe4c..d466b732a 100644 --- a/.github/workflows/crds-gen.yml +++ b/.github/workflows/crds-gen.yml @@ -30,12 +30,12 @@ jobs: fetch-depth: 0 - name: copy working directory to GOPATH - run: sudo mkdir -p /home/runner/go/src/github.com && sudo cp -a /home/runner/work/rook /home/runner/go/src/github.com/ + run: sudo mkdir -p /home/runner/go/src/github.com/rook && sudo cp -a /home/runner/work/cassandra/cassandra /home/runner/go/src/github.com/rook/. - name: run crds-gen - working-directory: /home/runner/go/src/github.com/rook/rook + working-directory: /home/runner/go/src/github.com/rook/cassandra run: GOPATH=$(go env GOPATH) make crds - name: validate crds-gen - working-directory: /home/runner/go/src/github.com/rook/rook + working-directory: /home/runner/go/src/github.com/rook/cassandra run: tests/scripts/validate_modified_files.sh crd diff --git a/.github/workflows/create-tag.yaml b/.github/workflows/create-tag.yaml new file mode 100644 index 000000000..5d4ebcb15 --- /dev/null +++ b/.github/workflows/create-tag.yaml @@ -0,0 +1,42 @@ +name: Tag +on: + workflow_dispatch: + inputs: + version: + description: 'Release version (e.g. v1.7.0)' + required: true + +defaults: + run: + # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell + shell: bash --noprofile --norc -eo pipefail -x {0} + +jobs: + Create-Tag: + runs-on: ubuntu-18.04 + if: github.repository == 'rook/cassandra' && contains('travisn,leseb,BlaineEXE,jbw976,galexrt,satoru-takeuchi', github.actor) + steps: + - name: checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: set env + run: | + echo "FROM_BRANCH=${GITHUB_REF##*/}" >> $GITHUB_ENV + echo "TO_TAG=$(git describe --abbrev=0 --tags)" >> $GITHUB_ENV + echo "GITHUB_USER=rook" >> $GITHUB_ENV + + - name: Create Tag + uses: negz/create-tag@v1 + with: + version: ${{ github.event.inputs.version }} + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Get Release Note + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_USER: ${{ env.GITHUB_USER }} + FROM_BRANCH: ${{ env.FROM_BRANCH }} + TO_TAG: ${{ env.TO_TAG }} + run: tests/scripts/gen_release_notes.sh diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml deleted file mode 100644 index 7ac4c8ab5..000000000 --- a/.github/workflows/helm-lint.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Reference https://github.com/helm/chart-testing-action/tree/master#example-workflow -name: Lint Charts - -on: - push: - tags: - - v* - branches: - - master - - release-* - pull_request: - branches: - - master - - release-* - -jobs: - lint-test: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: Set up Helm - uses: azure/setup-helm@v1 - with: - version: v3.6.2 - - - uses: actions/setup-python@v2 - with: - python-version: 3.9 - - - name: Set up chart-testing - uses: helm/chart-testing-action@v2.1.0 - - - name: Run chart-testing (lint) - run: ct lint --charts=./cluster/charts/rook-ceph --validate-yaml=false --validate-maintainers=false diff --git a/.github/workflows/integration-test-cassandra-suite.yaml b/.github/workflows/integration-test-cassandra-suite.yaml index 956daf6d0..46726a6b5 100644 --- a/.github/workflows/integration-test-cassandra-suite.yaml +++ b/.github/workflows/integration-test-cassandra-suite.yaml @@ -12,12 +12,11 @@ defaults: jobs: TestCassandraSuite: - if: ${{ github.event_name == 'pull_request' && github.ref != 'refs/heads/master' && contains(github.event.pull_request.labels.*.name, 'cassandra')}} runs-on: ubuntu-18.04 strategy: fail-fast: false matrix: - kubernetes-versions : ['v1.16.15', 'v1.21.0'] + kubernetes-versions : ['v1.17.17', 'v1.22.0'] steps: - name: checkout uses: actions/checkout@v2 @@ -32,7 +31,7 @@ jobs: - name: setup minikube uses: manusa/actions-setup-minikube@v2.4.2 with: - minikube version: 'v1.21.0' + minikube version: 'v1.22.0' kubernetes version: ${{ matrix.kubernetes-versions }} start args: --memory 6g --cpus=2 github token: ${{ secrets.GITHUB_TOKEN }} @@ -48,16 +47,16 @@ jobs: - name: TestCassandraSuite run: | - go test -v -timeout 1800s -run CassandraSuite github.com/rook/rook/tests/integration + go test -v -timeout 1800s -run CassandraSuite github.com/rook/cassandra/tests/integration - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: name: cassandra-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ + path: /home/runner/work/rook/cassandra/tests/integration/_output/tests/ - name: setup tmate session for debugging if: failure() uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 + timeout-minutes: 60 diff --git a/.github/workflows/integration-test-flex-suite.yaml b/.github/workflows/integration-test-flex-suite.yaml deleted file mode 100644 index 9e518a5d0..000000000 --- a/.github/workflows/integration-test-flex-suite.yaml +++ /dev/null @@ -1,60 +0,0 @@ -name: Integration test CephFlexSuite -on: - pull_request: - branches: - - master - - release-* - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - TestCephFlexSuite: - if: ${{ github.event_name == 'pull_request' && github.ref != 'refs/heads/master' && !contains(github.event.pull_request.labels.*.name, 'skip-ci') }} - runs-on: ubuntu-18.04 - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: 'v1.15.12' - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephFlexSuite - run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephFlexSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-flex-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 diff --git a/.github/workflows/integration-test-helm-suite.yaml b/.github/workflows/integration-test-helm-suite.yaml deleted file mode 100644 index ea7977601..000000000 --- a/.github/workflows/integration-test-helm-suite.yaml +++ /dev/null @@ -1,69 +0,0 @@ -name: Integration test CephHelmSuite -on: - pull_request: - branches: - - master - - release-* - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - TestCephHelmSuite: - if: ${{ github.event_name == 'pull_request' && github.ref != 'refs/heads/master' && !contains(github.event.pull_request.labels.*.name, 'skip-ci') }} - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.15.12', 'v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.3.1 - with: - minikube version: 'v1.18.1' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: remove read permission from kube config file - run: sudo chmod go-r ~/.kube/config - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephHelmSuite - run: | - tests/scripts/minikube.sh helm - tests/scripts/helm.sh up - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - SKIP_TEST_CLEANUP=false SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephHelmSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-helm-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 diff --git a/.github/workflows/integration-test-mgr-suite.yaml b/.github/workflows/integration-test-mgr-suite.yaml deleted file mode 100644 index 6deb56324..000000000 --- a/.github/workflows/integration-test-mgr-suite.yaml +++ /dev/null @@ -1,61 +0,0 @@ -name: Integration test CephMgrSuite -on: - pull_request: - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - TestCephMgrSuite: - runs-on: ubuntu-18.04 - if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephMgrSuite - run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephMgrSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-mgr-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 diff --git a/.github/workflows/integration-test-multi-cluster-suite.yaml b/.github/workflows/integration-test-multi-cluster-suite.yaml deleted file mode 100644 index 5040f44ca..000000000 --- a/.github/workflows/integration-test-multi-cluster-suite.yaml +++ /dev/null @@ -1,65 +0,0 @@ -name: Integration test CephMultiClusterDeploySuite -on: - pull_request: - branches: - - master - - release-* - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - TestCephMultiClusterDeploySuite: - if: ${{ github.event_name == 'pull_request' && github.ref != 'refs/heads/master' && !contains(github.event.pull_request.labels.*.name, 'skip-ci') }} - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephMultiClusterDeploySuite - run: | - export TEST_SCRATCH_DEVICE=$(sudo lsblk --paths|awk '/14G/ {print $1}'| head -1)1 - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephMultiClusterDeploySuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-multi-cluster-deploy-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 diff --git a/.github/workflows/integration-test-nfs-suite.yaml b/.github/workflows/integration-test-nfs-suite.yaml deleted file mode 100644 index c7411c513..000000000 --- a/.github/workflows/integration-test-nfs-suite.yaml +++ /dev/null @@ -1,67 +0,0 @@ -name: Integration test NFSSuite -on: - pull_request: - branches: - - master - - release-* - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - TestNfsSuite: - if: ${{ github.event_name == 'pull_request' && github.ref != 'refs/heads/master' && contains(github.event.pull_request.labels.*.name, 'nfs')}} - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.16.15', 'v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: | - GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='nfs' build - docker images - docker tag $(docker images|awk '/build-/ {print $1}') rook/nfs:v1.7.2 - - - name: install nfs-common - run: | - sudo apt-get update - sudo apt-get install nfs-common - - - name: TestNFSSuite - run: go test -v -timeout 1800s -run NfsSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: nfs-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 diff --git a/.github/workflows/integration-test-smoke-suite.yaml b/.github/workflows/integration-test-smoke-suite.yaml deleted file mode 100644 index e57dcfb88..000000000 --- a/.github/workflows/integration-test-smoke-suite.yaml +++ /dev/null @@ -1,64 +0,0 @@ -name: Integration test CephSmokeSuite -on: - pull_request: - branches: - - master - - release-* - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - TestCephSmokeSuite: - if: ${{ github.event_name == 'pull_request' && github.ref != 'refs/heads/master' && !contains(github.event.pull_request.labels.*.name, 'skip-ci') }} - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.15.12', 'v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephSmokeSuite - run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephSmokeSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-smoke-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 diff --git a/.github/workflows/integration-test-upgrade-suite.yaml b/.github/workflows/integration-test-upgrade-suite.yaml deleted file mode 100644 index 3aec1367e..000000000 --- a/.github/workflows/integration-test-upgrade-suite.yaml +++ /dev/null @@ -1,64 +0,0 @@ -name: Integration test CephUpgradeSuite -on: - pull_request: - branches: - - master - - release-* - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - TestCephUpgradeSuite: - if: ${{ github.event_name == 'pull_request' && github.ref != 'refs/heads/master' && !contains(github.event.pull_request.labels.*.name, 'skip-ci') }} - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.15.12','v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephUpgradeSuite - run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephUpgradeSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-upgrade-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 diff --git a/.github/workflows/integration-tests-on-release.yaml b/.github/workflows/integration-tests-on-release.yaml index 2fb5aed24..405d73c4b 100644 --- a/.github/workflows/integration-tests-on-release.yaml +++ b/.github/workflows/integration-tests-on-release.yaml @@ -13,273 +13,12 @@ defaults: shell: bash --noprofile --norc -eo pipefail -x {0} jobs: - TestCephFlexSuite: - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.15.12','v1.18.15','v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephFlexSuite - run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephFlexSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-flex-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - TestCephHelmSuite: - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.15.12','v1.18.15','v1.20.5','v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.3.1 - with: - minikube version: 'v1.18.1' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: remove read permission from kube config file - run: sudo chmod go-r ~/.kube/config - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephHelmSuite - run: | - tests/scripts/minikube.sh helm - tests/scripts/helm.sh up - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - SKIP_TEST_CLEANUP=false SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephHelmSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-helm-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - TestCephMultiClusterDeploySuite: - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.15.12','v1.18.15','v1.20.5','v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephMultiClusterDeploySuite - run: | - export TEST_SCRATCH_DEVICE=$(sudo lsblk --paths|awk '/14G/ {print $1}'| head -1)1 - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephMultiClusterDeploySuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-multi-cluster-deploy-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - TestCephSmokeSuite: - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.15.12','v1.18.15','v1.20.5','v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephSmokeSuite - run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephSmokeSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-smoke-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - TestCephUpgradeSuite: - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.15.12','v1.18.15','v1.20.5','v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk - run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test - - - name: build rook - run: tests/scripts/github-action-helper.sh build_rook - - - name: TestCephUpgradeSuite - run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephUpgradeSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-upgrade-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - TestCassandraSuite: runs-on: ubuntu-18.04 strategy: fail-fast: false matrix: - kubernetes-versions : ['v1.16.15','v1.18.15','v1.20.5','v1.21.0'] + kubernetes-versions : ['v1.16.15','v1.18.15','v1.20.5','v1.22.0'] steps: - name: checkout uses: actions/checkout@v2 @@ -294,7 +33,7 @@ jobs: - name: setup minikube uses: manusa/actions-setup-minikube@v2.4.2 with: - minikube version: 'v1.21.0' + minikube version: 'v1.22.0' kubernetes version: ${{ matrix.kubernetes-versions }} start args: --memory 6g --cpus=2 github token: ${{ secrets.GITHUB_TOKEN }} @@ -311,70 +50,16 @@ jobs: - name: TestCassandraSuite run: | export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CassandraSuite github.com/rook/rook/tests/integration + SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CassandraSuite github.com/rook/cassandra/tests/integration - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: name: cassandra-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - TestNFSSuite: - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.16.15','v1.18.15','v1.20.5','v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: check k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: | - GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='nfs' build - docker images - docker tag $(docker images|awk '/build-/ {print $1}') rook/nfs:v1.7.2 - - - name: install nfs-common - run: | - sudo apt-get update - sudo apt-get install nfs-common - - - name: TestNFSSuite - run: go test -v -timeout 1800s -run NfsSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: nfs-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ + path: /home/runner/work/rook/cassandra/tests/integration/_output/tests/ - name: setup tmate session for debugging if: failure() uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 + timeout-minutes: 30 diff --git a/.github/workflows/push-build.yaml b/.github/workflows/push-build.yaml index 089eaec37..4a3de2f7a 100644 --- a/.github/workflows/push-build.yaml +++ b/.github/workflows/push-build.yaml @@ -15,7 +15,7 @@ defaults: jobs: push-image-to-container-registry: runs-on: ubuntu-18.04 - if: github.repository == 'rook/rook' + if: github.repository == 'rook/cassandra' steps: - name: checkout uses: actions/checkout@v2 diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml new file mode 100644 index 000000000..8e188f70a --- /dev/null +++ b/.github/workflows/shellcheck.yaml @@ -0,0 +1,28 @@ +name: ShellCheck +on: + push: + tags: + - v* + branches: + - master + - release-* + pull_request: + branches: + - master + - release-* + +jobs: + shellcheck: + name: Shellcheck + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Run ShellCheck + uses: ludeeus/action-shellcheck@master + with: + severity: warning + check_together: 'yes' + disable_matcher: false + additional_files: build/run build/reset build/sed-in-place + ignore: olm + format: gcc diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 912b5cad7..81d4356eb 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -7,7 +7,7 @@ on: jobs: stale: runs-on: ubuntu-18.04 - if: github.repository == 'rook/rook' + if: github.repository == 'rook/cassandra' steps: - uses: actions/stale@v3 with: diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index 59e1ba87c..251a8f469 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -30,8 +30,8 @@ jobs: go-version: 1.16 - name: copy working directory to GOPATH - run: sudo mkdir -p /home/runner/go/src/github.com && sudo cp -a /home/runner/work/rook /home/runner/go/src/github.com/ + run: sudo mkdir -p /home/runner/go/src/github.com/rook && sudo cp -a /home/runner/work/cassandra/cassandra /home/runner/go/src/github.com/rook/. - name: run unit tests - working-directory: /home/runner/go/src/github.com/rook/rook + working-directory: /home/runner/go/src/github.com/rook/cassandra run: GOPATH=$(go env GOPATH) make -j $(nproc) test diff --git a/.mergify.yml b/.mergify.yml index 2d8d1fa09..2c9b9734f 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -16,6 +16,14 @@ pull_request_rules: label: add: - cassandra + - name: auto nfs label pr storage backend + conditions: + - title~=^nfs + - base=master + actions: + label: + add: + - nfs # if there is a conflict in a backport PR, ping the author to send a proper backport PR - name: ping author on conflicts @@ -77,21 +85,6 @@ pull_request_rules: dismiss_reviews: {} delete_head_branch: {} - # automerge backports if CI successfully ran - # release-1.3 branch - - name: automerge backport release-1.3 - conditions: - - author=mergify[bot] - - base=release-1.3 - - label!=do-not-merge - - 'status-success=DCO' - - 'status-success=continuous-integration/jenkins/pr-head' - actions: - merge: - method: merge - strict: false - dismiss_reviews: {} - delete_head_branch: {} # release-1.4 branch - name: automerge backport release-1.4 conditions: @@ -106,6 +99,7 @@ pull_request_rules: strict: false dismiss_reviews: {} delete_head_branch: {} + # release-1.5 branch - name: automerge backport release-1.5 conditions: @@ -120,6 +114,7 @@ pull_request_rules: strict: false dismiss_reviews: {} delete_head_branch: {} + # release-1.6 branch - name: automerge backport release-1.6 conditions: @@ -153,15 +148,38 @@ pull_request_rules: dismiss_reviews: {} delete_head_branch: {} - # Trigger backport PRs based on label - # release-1.3 branch - - actions: - backport: - branches: - - release-1.3 + # release-1.7 branch + - name: automerge backport release-1.7 conditions: - - label=backport-release-1.3 - name: backport release-1.3 + - author=mergify[bot] + - base=release-1.7 + - label!=do-not-merge + - 'status-success=DCO' + - 'check-success=canary' + - 'check-success=unittests' + - 'check-success=golangci-lint' + - 'check-success=codegen' + - 'check-success=lint' + - 'check-success=modcheck' + - 'check-success=pvc' + - 'check-success=pvc-db' + - 'check-success=pvc-db-wal' + - 'check-success=encryption-pvc' + - 'check-success=encryption-pvc-db' + - 'check-success=encryption-pvc-db-wal' + - 'check-success=encryption-pvc-kms-vault-token-auth' + - 'check-success=TestCephSmokeSuite (v1.15.12)' + - 'check-success=TestCephSmokeSuite (v1.21.0)' + - 'check-success=TestCephHelmSuite (v1.15.12)' + - 'check-success=TestCephHelmSuite (v1.21.0)' + - 'check-success=TestCephMultiClusterDeploySuite (v1.21.0)' + - 'check-success=TestCephUpgradeSuite (v1.21.0)' + actions: + merge: + method: merge + strict: false + dismiss_reviews: {} + delete_head_branch: {} # release-1.4 branch - actions: @@ -189,3 +207,12 @@ pull_request_rules: conditions: - label=backport-release-1.6 name: backport release-1.6 + + # release-1.7 branch + - actions: + backport: + branches: + - release-1.7 + conditions: + - label=backport-release-1.7 + name: backport release-1.7 diff --git a/ADOPTERS.md b/ADOPTERS.md deleted file mode 100644 index bec8c9ea7..000000000 --- a/ADOPTERS.md +++ /dev/null @@ -1,65 +0,0 @@ -# Rook Adopters - -Below is a list of adopters of Rook in **production environments** that have publicly shared the -details of their usage as well as the benefits provided by Rook that their business relies on. There -are additional adopters of Rook, especially those with on-premises deployments, that are not ready -to share the details of their usage publicly at this time. - -* [Calit2 (California Institute for Telecommunications and Information - Technology)](http://www.calit2.net/) is one of 4 institutes formed by a joint partnership of - University of California and the state of California with the goal of *“inventing the university - research environment of the future”*. They operate one of the largest known Rook clusters in - production and they are using Rook to provide cheap, reliable, and fast storage to scientific - users. -* [NAV (Norwegian Labour and Welfare Administration)](https://www.nav.no/) is the current Norwegian - public welfare agency, responsible for 1/3 of the state budget of Norway. They find a massive - simplification of management and maintenance for their Ceph clusters by adopting Rook. -* [Replicated](https://www.replicated.com/) delivers *“SaaS On-Prem”* and are the creators of - open-source [kURL](https://kurl.sh/): a custom Kubernetes distro creator that software vendors use - to package and distribute production-grade Kubernetes infrastructure. Rook is a default add-on in - kURL, so all installations include Rook to manage highly available storage that the software - vendor can depend on. -* [Discogs](https://www.discogs.com/) is building the largest and most comprehensive music database - and marketplace in the world and services millions of users all across the globe. Rook enables - them to save both time and money in the long term and allows their IT operations team to function - with fewer dedicated staff. -* [Finleap Connect](https://connect.finleap.com/) offers a full range of leading fintech solutions - to financial institutions across Europe. Rook has been running flawlessly for them across many - versions and upgrades, and delivers the performance and resilience they require for their most - critical business applications. -* [Centre of Excellence in Next Generation Networks (CENGN)](https://www.cengn.ca) is on a mission - to accelerate the growth of the Canadian Information and Communications Technology (ICT) sector. - The Rook Ceph operator is key to the Kubernetes clusters frequently set up for projects by - small-medium enterprises in the CENGN labs. -* [Avisi](https://www.avisi.nl/) develops and operates software for organizations like the the Dutch - Notary Association. They have survived multiple disaster scenarios already with Rook and it has - made their cloud native journey in the private cloud so much easier by providing a powerful tool - that lets them take advantage of a mature storage product with ease and peace of mind. -* [Geodata](https://geodata.no/) Provides geospatial services and Geographical Information Systems - (GIS). The latest versions of Rook have amazed them, especially compared to many of the other - storage options they have attempted in the cloud native ecosystem. -* [Informatik Computer Cloud (ICC)](https://icc.informatik.haw-hamburg.de/) at the Hamburg - University of Applied Sciences to provide a seamless, fast, flexible and reliable storage service - to its staff and students. -* [Gini](https://gini.net/en/) uses Ceph with Rook to provide a redundant and stable S3-compatible - storage infrastructure for their services in order to provide the world's most advanced digital - everyday assistant to their users. -* [Cloudways](https://www.cloudways.com/en/) utilizes the flexibility of Rook's orchestration of - Ceph for their users, taking advantage of the fast I/O with block storage as well as multiple - readers and writers of shared filesystem storage. -* [Crowdfox](https://www.crowdfox.com/crowdfox.html) believes in strong community projects and are - therefore putting their bets on Rook. They were able to seamlessly migrate VMs between host nodes - with zero downtime thanks to Rook. -* [Radio Sound](https://radiosound.com/) uses Rook to power their website and GitLab for their CI/CD - pipeline, because of the truly cloud-native experience, like *"a little drop of the Google magic - in our own server rack"*. -* [CyCore Systems](https://www.cycoresys.com/) -* [Datacom](http://datacom.co.nz/Home.aspx) -* [Turtle Network (BLACK TURTLE BVBA)](https://www.turtlenetwork.eu/#home) -* [LeanNet Ltd.](https://leannet.eu/) -* [FHE3 GmbH](https://www.fhe3.com/) -* [infraBuilder](https://infrabuilder.com/) -* [GreenCom Networks](http://www.greencom-networks.com/en) - -There are many additional adopters of Rook in the evaluating phase that will be added to this list -as they transition to production deployments. diff --git a/CODE-OWNERS b/CODE-OWNERS index 8ae1d8a80..45d2cdfc6 100644 --- a/CODE-OWNERS +++ b/CODE-OWNERS @@ -11,17 +11,6 @@ areas: - travisn - galexrt - jbw976 - ceph: - approvers: - - travisn - - BlaineEXE - - leseb - - satoru-takeuchi - reviewers: - - Madhu-1 cassandra: approvers: - yanniszark - nfs: - reviewers: - - rohan47 diff --git a/Documentation/README.md b/Documentation/README.md index 12aaf9193..cde499d6a 100644 --- a/Documentation/README.md +++ b/Documentation/README.md @@ -12,17 +12,11 @@ We plan to continue adding support for other storage systems and environments ba ## Quick Start Guides Starting Rook in your cluster is as simple as a few `kubectl` commands depending on the storage provider. -See our [Quickstart](quickstart.md) guide list for the detailed instructions for each storage provider. +See our [Quickstart](quickstart.md) guide list for the detailed instructions to install the Cassandra operator. ## Storage Provider Designs -High-level Storage Provider design documents: - -| Storage Provider | Status | Description | -| ----------------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| [Ceph](ceph-storage.md) | Stable | Ceph is a highly scalable distributed storage solution for block storage, object storage, and shared filesystems with years of production deployments. | - -Low level design documentation for supported list of storage systems collected at [design docs](https://github.com/rook/rook/tree/master/design) section. +Low level design documentation for supported list of storage systems collected at [design docs](https://github.com/rook/cassandra/tree/master/design) section. ## Need help? Be sure to join the Rook Slack diff --git a/Documentation/admission-controller-usage.md b/Documentation/admission-controller-usage.md deleted file mode 100644 index 2cfbf73d4..000000000 --- a/Documentation/admission-controller-usage.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Admission Controller -weight: 2030 -indent: true ---- - -# Admission Controller - -An admission controller intercepts requests to the Kubernetes API server prior to persistence of the object, but after the request is authenticated and authorized. - -Enabling the Rook admission controller is recommended to provide an additional level of validation that Rook is configured correctly with the custom resource (CR) settings. - -## Quick Start - -To deploy the Rook admission controllers we have a helper script that will automate the configuration. - -This script will help us achieve the following tasks -1. Creates certificate using cert-manager. -2. Creates ValidatingWebhookConfig and fills the CA bundle with the appropriate value from the cluster. - -Run the following commands: -```console -kubectl create -f cluster/examples/kubernetes/ceph/crds.yaml -f cluster/examples/kubernetes/ceph/common.yaml -tests/scripts/deploy_admission_controller.sh -``` -Now that the Secrets have been deployed, we can deploy the operator: -```console -kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml -``` - -At this point the operator will start the admission controller Deployment automatically and the Webhook will start intercepting requests for Rook resources. diff --git a/Documentation/cassandra-cluster-crd.md b/Documentation/cassandra-cluster-crd.md index 5d6be72cc..4ab893861 100644 --- a/Documentation/cassandra-cluster-crd.md +++ b/Documentation/cassandra-cluster-crd.md @@ -1,13 +1,13 @@ --- title: Cassandra Cluster CRD -weight: 5000 +weight: 3000 --- # Cassandra Cluster CRD Cassandra database clusters can be created and configuring using the `clusters.cassandra.rook.io` custom resource definition (CRD). -Please refer to the the [user guide walk-through](cassandra.md) for complete instructions. +Please refer to the the [user guide walk-through](quickstart.md) for complete instructions. This page will explain all the available configuration options on the Cassandra CRD. ## Sample @@ -85,7 +85,7 @@ In the Cassandra model, each cluster contains datacenters and each datacenter co * `name`: Name of the rack. Usually, a rack corresponds to an availability zone. * `members`: Number of Cassandra members for the specific rack. (In Cassandra documentation, they are called nodes. We don't call them nodes to avoid confusion as a Cassandra Node corresponds to a Kubernetes Pod, not a Kubernetes Node). * `storage`: Defines the volumes to use for each Cassandra member. Currently, only 1 volume is supported. -* `jmxExporterConfigMapName`: Name of configmap that will be used for [jmx_exporter](https://github.com/prometheus/jmx_exporter). Exporter listens on port 9180. If the name not specified, the exporter will not be run. +* `jmxExporterConfigMapName`: Name of configmap that will be used for [jmx_exporter](https://github.com/prometheus/jmx_exporter). Exporter listens on port 9180. If the name not specified, the exporter will not be run. * `resources`: Defines the CPU and RAM resources for the Cassandra Pods. * `annotations`: Key value pair list of annotations to add. * `placement`: Defines the placement of Cassandra Pods. Has the following subfields: diff --git a/Documentation/cassandra-operator-upgrade.md b/Documentation/cassandra-operator-upgrade.md index bb90a2001..900b893b8 100644 --- a/Documentation/cassandra-operator-upgrade.md +++ b/Documentation/cassandra-operator-upgrade.md @@ -1,7 +1,6 @@ --- -title: Upgrade -weight: 5100 -indent: true +title: Cassandra Upgrade +weight: 6000 --- # Cassandra Operator Upgrades diff --git a/Documentation/cassandra.md b/Documentation/cassandra.md deleted file mode 100644 index 70a5b56a4..000000000 --- a/Documentation/cassandra.md +++ /dev/null @@ -1,208 +0,0 @@ ---- -title: Cassandra -weight: 250 -indent: true ---- -{% include_relative branch.liquid %} - -# Cassandra Quickstart - -[Cassandra](http://cassandra.apache.org/) is a highly available, fault tolerant, peer-to-peer NoSQL database featuring lightning fast performance and tunable consistency. It provides massive scalability with no single point of failure. - -[Scylla](https://www.scylladb.com) is a close-to-the-hardware rewrite of Cassandra in C++. It features a shared nothing architecture that enables true linear scaling and major hardware optimizations that achieve ultra-low latencies and extreme throughput. It is a drop-in replacement for Cassandra and uses the same interfaces, so it is also supported by Rook. - -## Prerequisites - -A Kubernetes cluster (v1.16 or higher) is necessary to run the Rook Cassandra operator. -To make sure you have a Kubernetes cluster that is ready for `Rook`, you can [follow these instructions](k8s-pre-reqs.md) (the flexvolume plugin is not necessary for Cassandra) - -## Deploy Cassandra Operator - -First deploy the Rook Cassandra Operator using the following commands: - -```console -$ git clone --single-branch --branch v1.7.2 https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/cassandra -kubectl apply -f crds.yaml -kubectl apply -f operator.yaml -``` - -This will install the operator in namespace rook-cassandra-system. You can check if the operator is up and running with: - -```console -kubectl -n rook-cassandra-system get pod -``` - -## Create and Initialize a Cassandra/Scylla Cluster - -Now that the operator is running, we can create an instance of a Cassandra/Scylla cluster by creating an instance of the `clusters.cassandra.rook.io` resource. -Some of that resource's values are configurable, so feel free to browse `cluster.yaml` and tweak the settings to your liking. -Full details for all the configuration options can be found in the [Cassandra Cluster CRD documentation](cassandra-cluster-crd.md). - -When you are ready to create a Cassandra cluster, simply run: - -```console -kubectl create -f cluster.yaml -``` - -We can verify that a Kubernetes object has been created that represents our new Cassandra cluster with the command below. -This is important because it shows that Rook has successfully extended Kubernetes to make Cassandra clusters a first class citizen in the Kubernetes cloud-native environment. - -```console -kubectl -n rook-cassandra get clusters.cassandra.rook.io -``` - -To check if all the desired members are running, you should see the same number of entries from the following command as the number of members that was specified in `cluster.yaml`: - -```console -kubectl -n rook-cassandra get pod -l app=rook-cassandra -``` - -You can also track the state of a Cassandra cluster from its status. To check the current status of a Cluster, run: - -```console -kubectl -n rook-cassandra describe clusters.cassandra.rook.io rook-cassandra -``` - -## Accessing the Database - -* From kubectl: - -To get a `cqlsh` shell in your new Cluster: - -```console -kubectl exec -n rook-cassandra -it rook-cassandra-east-1-east-1a-0 -- cqlsh -> DESCRIBE KEYSPACES; -``` - -* From inside a Pod: - -When you create a new Cluster, Rook automatically creates a Service for the clients to use in order to access the Cluster. The service's name follows the convention `-client`. You can see this Service in you cluster by running: - -```console -kubectl -n rook-cassandra describe service rook-cassandra-client -``` - -Pods running inside the Kubernetes cluster can use this Service to connect to Cassandra. -Here's an example using the [Python Driver](https://github.com/datastax/python-driver): - -```python -from cassandra.cluster import Cluster - -cluster = Cluster(['rook-cassandra-client.rook-cassandra.svc.cluster.local']) -session = cluster.connect() -``` - -## Scale Up - -The operator supports scale up of a rack as well as addition of new racks. To make the changes, you can use: - -```console -kubectl edit clusters.cassandra.rook.io rook-cassandra -``` - -* To scale up a rack, change the `Spec.Members` field of the rack to the desired value. -* To add a new rack, append the `racks` list with a new rack. Remember to choose a different rack name for the new rack. -* After editing and saving the yaml, check your cluster's Status and Events for information on what's happening: - -```console -kubectl -n rook-cassandra describe clusters.cassandra.rook.io rook-cassandra -``` - - -## Scale Down - -The operator supports scale down of a rack. To make the changes, you can use: - -```console -kubectl edit clusters.cassandra.rook.io rook-cassandra -``` - -* To scale down a rack, change the `Spec.Members` field of the rack to the desired value. -* After editing and saving the yaml, check your cluster's Status and Events for information on what's happening: - -```console -kubectl -n rook-cassandra describe clusters.cassandra.rook.io rook-cassandra -``` - -## Clean Up - -To clean up all resources associated with this walk-through, you can run the commands below. - -> **NOTE**: that this will destroy your database and delete all of its associated data. - -```console -kubectl delete -f cluster.yaml -kubectl delete -f operator.yaml -kubectl delete -f crds.yaml -``` - -## Troubleshooting - -If the cluster does not come up, the first step would be to examine the operator's logs: - -```console -kubectl -n rook-cassandra-system logs -l app=rook-cassandra-operator -``` - -If everything looks OK in the operator logs, you can also look in the logs for one of the Cassandra instances: - -```console -kubectl -n rook-cassandra logs rook-cassandra-0 -``` - -## Cassandra Monitoring - -To enable jmx_exporter for cassandra rack, you should specify `jmxExporterConfigMapName` option for rack in CassandraCluster CRD. - -For example: -```yaml -apiVersion: cassandra.rook.io/v1alpha1 -kind: Cluster -metadata: - name: my-cassandra - namespace: rook-cassandra -spec: - ... - datacenter: - name: my-datacenter - racks: - - name: my-rack - members: 3 - jmxExporterConfigMapName: jmx-exporter-settings - storage: - volumeClaimTemplates: - - metadata: - name: rook-cassandra-data - spec: - storageClassName: my-storage-class - resources: - requests: - storage: 200Gi -``` - -Simple config map example to get all metrics: -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: jmx-exporter-settings - namespace: rook-cassandra -data: - jmx_exporter_config.yaml: | - lowercaseOutputLabelNames: true - lowercaseOutputName: true - whitelistObjectNames: ["org.apache.cassandra.metrics:*"] -``` - -ConfigMap's data field must contain `jmx_exporter_config.yaml` key with jmx exporter settings. - -There is no automatic reloading mechanism for pods when the config map updated. -After the configmap changed, you should restart all rack pods manually: - -```bash -NAMESPACE= -CLUSTER= -RACKS=$(kubectl get sts -n ${NAMESPACE} -l "cassandra.rook.io/cluster=${CLUSTER}") -echo ${RACKS} | xargs -n1 kubectl rollout restart -n ${NAMESPACE} -``` diff --git a/Documentation/ceph-advanced-configuration.md b/Documentation/ceph-advanced-configuration.md deleted file mode 100644 index 906f3c1d4..000000000 --- a/Documentation/ceph-advanced-configuration.md +++ /dev/null @@ -1,592 +0,0 @@ ---- -title: Advanced Configuration -weight: 11300 -indent: true ---- - -# Advanced Configuration - -These examples show how to perform advanced configuration tasks on your Rook -storage cluster. - -* [Prerequisites](#prerequisites) -* [Using alternate namespaces](#using-alternate-namespaces) -* [Deploying a second cluster](#deploying-a-second-cluster) -* [Use custom Ceph user and secret for mounting](#use-custom-ceph-user-and-secret-for-mounting) -* [Log Collection](#log-collection) -* [OSD Information](#osd-information) -* [Separate Storage Groups](#separate-storage-groups) -* [Configuring Pools](#configuring-pools) -* [Custom ceph.conf Settings](#custom-cephconf-settings) -* [OSD CRUSH Settings](#osd-crush-settings) -* [OSD Dedicated Network](#osd-dedicated-network) -* [Phantom OSD Removal](#phantom-osd-removal) -* [Change Failure Domain](#change-failure-domain) - -## Prerequisites - -Most of the examples make use of the `ceph` client command. A quick way to use -the Ceph client suite is from a [Rook Toolbox container](ceph-toolbox.md). - -The Kubernetes based examples assume Rook OSD pods are in the `rook-ceph` namespace. -If you run them in a different namespace, modify `kubectl -n rook-ceph [...]` to fit -your situation. - -## Using alternate namespaces - -If you wish to deploy the Rook Operator and/or Ceph clusters to namespaces other than the default -`rook-ceph`, the manifests are commented to allow for easy `sed` replacements. Change -`ROOK_CLUSTER_NAMESPACE` to tailor the manifests for additional Ceph clusters. You can choose -to also change `ROOK_OPERATOR_NAMESPACE` to create a new Rook Operator for each Ceph cluster (don't -forget to set `ROOK_CURRENT_NAMESPACE_ONLY`), or you can leave it at the same value for every -Ceph cluster if you only wish to have one Operator manage all Ceph clusters. - -This will help you manage namespaces more easily, but you should still make sure the resources are -configured to your liking. - -```sh -cd cluster/examples/kubernetes/ceph - -export ROOK_OPERATOR_NAMESPACE="rook-ceph" -export ROOK_CLUSTER_NAMESPACE="rook-ceph" - -sed -i.bak \ - -e "s/\(.*\):.*# namespace:operator/\1: $ROOK_OPERATOR_NAMESPACE # namespace:operator/g" \ - -e "s/\(.*\):.*# namespace:cluster/\1: $ROOK_CLUSTER_NAMESPACE # namespace:cluster/g" \ - -e "s/\(.*serviceaccount\):.*:\(.*\) # serviceaccount:namespace:operator/\1:$ROOK_OPERATOR_NAMESPACE:\2 # serviceaccount:namespace:operator/g" \ - -e "s/\(.*serviceaccount\):.*:\(.*\) # serviceaccount:namespace:cluster/\1:$ROOK_CLUSTER_NAMESPACE:\2 # serviceaccount:namespace:cluster/g" \ - -e "s/\(.*\): [-_A-Za-z0-9]*\.\(.*\) # driver:namespace:operator/\1: $ROOK_OPERATOR_NAMESPACE.\2 # driver:namespace:operator/g" \ - -e "s/\(.*\): [-_A-Za-z0-9]*\.\(.*\) # driver:namespace:cluster/\1: $ROOK_CLUSTER_NAMESPACE.\2 # driver:namespace:cluster/g" \ - common.yaml operator.yaml cluster.yaml # add other files or change these as desired for your config - -# You need to use `apply` for all Ceph clusters after the first if you have only one Operator -kubectl apply -f common.yaml -f operator.yaml -f cluster.yaml # add other files as desired for yourconfig -``` - -## Deploying a second cluster - -If you wish to create a new CephCluster in a different namespace than `rook-ceph` while using a single operator to manage both clusters execute the following: - -```sh -cd cluster/examples/kubernetes/ceph - -NAMESPACE=rook-ceph-secondary envsubst < common-second-cluster.yaml | kubectl create -f - -``` - -This will create all the necessary RBACs as well as the new namespace. The script assumes that `common.yaml` was already created. -When you create the second CephCluster CR, use the same `NAMESPACE` and the operator will configure the second cluster. - -## Use custom Ceph user and secret for mounting - -> **NOTE**: For extensive info about creating Ceph users, consult the Ceph documentation: https://docs.ceph.com/en/latest/rados/operations/user-management/#add-a-user. - -Using a custom Ceph user and secret can be done for filesystem and block storage. - -Create a custom user in Ceph with read-write access in the `/bar` directory on CephFS: - -```console -$ ceph auth get-or-create-key client.user1 mon 'allow r' osd 'allow rw tag cephfs data=YOUR_FS_DATA_POOL' mds 'allow r, allow rw path=/bar' -``` - -The command will return a Ceph secret key, this key should be added as a secret in Kubernetes like this: - -```console -$ kubectl create secret generic ceph-user1-secret --from-literal=key=YOUR_CEPH_KEY -``` - -> **NOTE**: This secret with the same name must be created in each namespace where the StorageClass will be used. - -In addition to this Secret you must create a RoleBinding to allow the Rook Ceph agent to get the secret from each namespace. -The RoleBinding is optional if you are using a ClusterRoleBinding for the Rook Ceph agent secret access. -A ClusterRole which contains the permissions which are needed and used for the Bindings are shown as an example after the next step. - -On a StorageClass `parameters` and/or flexvolume Volume entry `options` set the following options: - -```yaml -mountUser: user1 -mountSecret: ceph-user1-secret -``` - -If you want the Rook Ceph agent to require a `mountUser` and `mountSecret` to be set in StorageClasses using Rook, you must set the environment variable `AGENT_MOUNT_SECURITY_MODE` to `Restricted` on the Rook Ceph operator Deployment. - -For more information on using the Ceph feature to limit access to CephFS paths, see [Ceph Documentation - Path Restriction](https://docs.ceph.com/en/latest/cephfs/client-auth/#path-restriction). - -### ClusterRole - -> **NOTE**: When you are using the Helm chart to install the Rook Ceph operator and have set `mountSecurityMode` to e.g., `Restricted`, then the below ClusterRole has already been created for you. - -**This ClusterRole is needed no matter if you want to use a RoleBinding per namespace or a ClusterRoleBinding.** - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-ceph-agent-mount - labels: - operator: rook - storage-backend: ceph -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - get -``` - -### RoleBinding - -> **NOTE**: You either need a RoleBinding in each namespace in which a mount secret resides in or create a ClusterRoleBinding with which the Rook Ceph agent -> has access to Kubernetes secrets in all namespaces. - -Create the RoleBinding shown here in each namespace the Rook Ceph agent should read secrets for mounting. -The RoleBinding `subjects`' `namespace` must be the one the Rook Ceph agent runs in (default `rook-ceph` for version 1.0 and newer. The default namespace in -previous versions was `rook-ceph-system`). - -Replace `namespace: name-of-namespace-with-mountsecret` according to the name of all namespaces a `mountSecret` can be in. - -```yaml -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-agent-mount - namespace: name-of-namespace-with-mountsecret - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-agent-mount -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph -``` - -### ClusterRoleBinding - -This ClusterRoleBinding only needs to be created once, as it covers the whole cluster. - -```yaml -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-agent-mount - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-agent-mount -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph -``` - -## Log Collection - -All Rook logs can be collected in a Kubernetes environment with the following command: - -```console -for p in $(kubectl -n rook-ceph get pods -o jsonpath='{.items[*].metadata.name}') -do - for c in $(kubectl -n rook-ceph get pod ${p} -o jsonpath='{.spec.containers[*].name}') - do - echo "BEGIN logs from pod: ${p} ${c}" - kubectl -n rook-ceph logs -c ${c} ${p} - echo "END logs from pod: ${p} ${c}" - done -done -``` - -This gets the logs for every container in every Rook pod and then compresses them into a `.gz` archive -for easy sharing. Note that instead of `gzip`, you could instead pipe to `less` or to a single text file. - -## OSD Information - -Keeping track of OSDs and their underlying storage devices can be -difficult. The following scripts will clear things up quickly. - -### Kubernetes - -```console -# Get OSD Pods -# This uses the example/default cluster name "rook" -OSD_PODS=$(kubectl get pods --all-namespaces -l \ - app=rook-ceph-osd,rook_cluster=rook-ceph -o jsonpath='{.items[*].metadata.name}') - -# Find node and drive associations from OSD pods -for pod in $(echo ${OSD_PODS}) -do - echo "Pod: ${pod}" - echo "Node: $(kubectl -n rook-ceph get pod ${pod} -o jsonpath='{.spec.nodeName}')" - kubectl -n rook-ceph exec ${pod} -- sh -c '\ - for i in /var/lib/ceph/osd/ceph-*; do - [ -f ${i}/ready ] || continue - echo -ne "-$(basename ${i}) " - echo $(lsblk -n -o NAME,SIZE ${i}/block 2> /dev/null || \ - findmnt -n -v -o SOURCE,SIZE -T ${i}) $(cat ${i}/type) - done | sort -V - echo' -done -``` - -The output should look something like this. - ->``` ->Pod: osd-m2fz2 ->Node: node1.zbrbdl ->-osd0 sda3 557.3G bluestore ->-osd1 sdf3 110.2G bluestore ->-osd2 sdd3 277.8G bluestore ->-osd3 sdb3 557.3G bluestore ->-osd4 sde3 464.2G bluestore ->-osd5 sdc3 557.3G bluestore -> ->Pod: osd-nxxnq ->Node: node3.zbrbdl ->-osd6 sda3 110.7G bluestore ->-osd17 sdd3 1.8T bluestore ->-osd18 sdb3 231.8G bluestore ->-osd19 sdc3 231.8G bluestore -> ->Pod: osd-tww1h ->Node: node2.zbrbdl ->-osd7 sdc3 464.2G bluestore ->-osd8 sdj3 557.3G bluestore ->-osd9 sdf3 66.7G bluestore ->-osd10 sdd3 464.2G bluestore ->-osd11 sdb3 147.4G bluestore ->-osd12 sdi3 557.3G bluestore ->-osd13 sdk3 557.3G bluestore ->-osd14 sde3 66.7G bluestore ->-osd15 sda3 110.2G bluestore ->-osd16 sdh3 135.1G bluestore ->``` - - -## Separate Storage Groups - -> **DEPRECATED**: Instead of manually needing to set this, the `deviceClass` property can be used on Pool structures in `CephBlockPool`, `CephFilesystem` and `CephObjectStore` CRD objects. - -By default Rook/Ceph puts all storage under one replication rule in the CRUSH -Map which provides the maximum amount of storage capacity for a cluster. If you -would like to use different storage endpoints for different purposes, you'll -have to create separate storage groups. - -In the following example we will separate SSD drives from spindle-based drives, -a common practice for those looking to target certain workloads onto faster -(database) or slower (file archive) storage. - -## Configuring Pools - -### Placement Group Sizing - -> **NOTE**: Since Ceph Nautilus (v14.x), you can use the Ceph MGR `pg_autoscaler` -> module to auto scale the PGs as needed. If you want to enable this feature, -> please refer to [Default PG and PGP counts](ceph-configuration.md#default-pg-and-pgp-counts). - -The general rules for deciding how many PGs your pool(s) should contain is: - -* Less than 5 OSDs set pg_num to 128 -* Between 5 and 10 OSDs set pg_num to 512 -* Between 10 and 50 OSDs set pg_num to 1024 - -If you have more than 50 OSDs, you need to understand the tradeoffs and how to -calculate the pg_num value by yourself. For calculating pg_num yourself please -make use of [the pgcalc tool](http://ceph.com/pgcalc/). - -If you're already using a pool it is generally safe to [increase its PG -count](#setting-pg-count) on-the-fly. Decreasing the PG count is not -recommended on a pool that is in use. The safest way to decrease the PG count -is to back-up the data, [delete the pool](#deleting-a-pool), and [recreate -it](#creating-a-pool). With backups you can try a few potentially unsafe -tricks for live pools, documented -[here](http://cephnotes.ksperis.com/blog/2015/04/15/ceph-pool-migration). - -### Setting PG Count - -Be sure to read the [placement group sizing](#placement-group-sizing) section -before changing the number of PGs. - -```console -# Set the number of PGs in the rbd pool to 512 -ceph osd pool set rbd pg_num 512 -``` - -## Custom ceph.conf Settings - -> **WARNING**: The advised method for controlling Ceph configuration is to manually use the Ceph CLI -> or the Ceph dashboard because this offers the most flexibility. It is highly recommended that this -> only be used when absolutely necessary and that the `config` be reset to an empty string if/when the -> configurations are no longer necessary. Configurations in the config file will make the Ceph cluster -> less configurable from the CLI and dashboard and may make future tuning or debugging difficult. - -Setting configs via Ceph's CLI requires that at least one mon be available for the configs to be -set, and setting configs via dashboard requires at least one mgr to be available. Ceph may also have -a small number of very advanced settings that aren't able to be modified easily via CLI or -dashboard. In order to set configurations before monitors are available or to set problematic -configuration settings, the `rook-config-override` ConfigMap exists, and the `config` field can be -set with the contents of a `ceph.conf` file. The contents will be propagated to all mon, mgr, OSD, -MDS, and RGW daemons as an `/etc/ceph/ceph.conf` file. - -> **WARNING**: Rook performs no validation on the config, so the validity of the settings is the -> user's responsibility. - -If the `rook-config-override` ConfigMap is created before the cluster is started, the Ceph daemons -will automatically pick up the settings. If you add the settings to the ConfigMap after the cluster -has been initialized, each daemon will need to be restarted where you want the settings applied: - -* mons: ensure all three mons are online and healthy before restarting each mon pod, one at a time. -* mgrs: the pods are stateless and can be restarted as needed, but note that this will disrupt the - Ceph dashboard during restart. -* OSDs: restart your the pods by deleting them, one at a time, and running `ceph -s` -between each restart to ensure the cluster goes back to "active/clean" state. -* RGW: the pods are stateless and can be restarted as needed. -* MDS: the pods are stateless and can be restarted as needed. - -After the pod restart, the new settings should be in effect. Note that if the ConfigMap in the Ceph -cluster's namespace is created before the cluster is created, the daemons will pick up the settings -at first launch. - -### Example - -In this example we will set the default pool `size` to two, and tell OSD -daemons not to change the weight of OSDs on startup. - -> **WARNING**: Modify Ceph settings carefully. You are leaving the sandbox tested by Rook. -> Changing the settings could result in unhealthy daemons or even data loss if used incorrectly. - -When the Rook Operator creates a cluster, a placeholder ConfigMap is created that -will allow you to override Ceph configuration settings. When the daemon pods are started, the -settings specified in this ConfigMap will be merged with the default settings -generated by Rook. - -The default override settings are blank. Cutting out the extraneous properties, -we would see the following defaults after creating a cluster: - -```console -kubectl -n rook-ceph get ConfigMap rook-config-override -o yaml -``` - -```yaml -kind: ConfigMap -apiVersion: v1 -metadata: - name: rook-config-override - namespace: rook-ceph -data: - config: "" -``` - -To apply your desired configuration, you will need to update this ConfigMap. The next time the -daemon pod(s) start, they will use the updated configs. - -```console -kubectl -n rook-ceph edit configmap rook-config-override -``` - -Modify the settings and save. Each line you add should be indented from the `config` property as such: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: rook-config-override - namespace: rook-ceph -data: - config: | - [global] - osd crush update on start = false - osd pool default size = 2 -``` - -## OSD CRUSH Settings - -A useful view of the [CRUSH Map](http://docs.ceph.com/docs/master/rados/operations/crush-map/) -is generated with the following command: - -```console -ceph osd tree -``` - -In this section we will be tweaking some of the values seen in the output. - -### OSD Weight - -The CRUSH weight controls the ratio of data that should be distributed to each -OSD. This also means a higher or lower amount of disk I/O operations for an OSD -with higher/lower weight, respectively. - -By default OSDs get a weight relative to their storage capacity, which maximizes -overall cluster capacity by filling all drives at the same rate, even if drive -sizes vary. This should work for most use-cases, but the following situations -could warrant weight changes: - -* Your cluster has some relatively slow OSDs or nodes. Lowering their weight can - reduce the impact of this bottleneck. -* You're using bluestore drives provisioned with Rook v0.3.1 or older. In this - case you may notice OSD weights did not get set relative to their storage - capacity. Changing the weight can fix this and maximize cluster capacity. - -This example sets the weight of osd.0 which is 600GiB - -```console -ceph osd crush reweight osd.0 .600 -``` - -### OSD Primary Affinity - -When pools are set with a size setting greater than one, data is replicated -between nodes and OSDs. For every chunk of data a Primary OSD is selected to be -used for reading that data to be sent to clients. You can control how likely it -is for an OSD to become a Primary using the Primary Affinity setting. This is -similar to the OSD weight setting, except it only affects reads on the storage -device, not capacity or writes. - -In this example we will make sure `osd.0` is only selected as Primary if all -other OSDs holding replica data are unavailable: - -```console -ceph osd primary-affinity osd.0 0 -``` - -## OSD Dedicated Network - -It is possible to configure ceph to leverage a dedicated network for the OSDs to -communicate across. A useful overview is the [CEPH Networks](http://docs.ceph.com/docs/master/rados/configuration/network-config-ref/#ceph-networks) -section of the Ceph documentation. If you declare a cluster network, OSDs will -route heartbeat, object replication and recovery traffic over the cluster -network. This may improve performance compared to using a single network. - -Two changes are necessary to the configuration to enable this capability: - -### Use hostNetwork in the rook ceph cluster configuration - -Enable the `hostNetwork` setting in the [Ceph Cluster CRD configuration](https://rook.io/docs/rook/master/ceph-cluster-crd.html#samples). -For example, - -```yaml - network: - provider: host -``` - -> IMPORTANT: Changing this setting is not supported in a running Rook cluster. Host networking -> should be configured when the cluster is first created. - -### Define the subnets to use for public and private OSD networks - -Edit the `rook-config-override` configmap to define the custom network -configuration: - -```console -kubectl -n rook-ceph edit configmap rook-config-override -``` - -In the editor, add a custom configuration to instruct ceph which subnet is the -public network and which subnet is the private network. For example: - -```yaml -apiVersion: v1 -data: - config: | - [global] - public network = 10.0.7.0/24 - cluster network = 10.0.10.0/24 - public addr = "" - cluster addr = "" -``` - -After applying the updated rook-config-override configmap, it will be necessary -to restart the OSDs by deleting the OSD pods in order to apply the change. -Restart the OSD pods by deleting them, one at a time, and running ceph -s -between each restart to ensure the cluster goes back to "active/clean" state. - -## Phantom OSD Removal - -If you have OSDs in which are not showing any disks, you can remove those "Phantom OSDs" by following the instructions below. -To check for "Phantom OSDs", you can run: - -```console -ceph osd tree -``` - -An example output looks like this: - ->``` ->ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF ->-1 57.38062 root default ->-13 7.17258 host node1.example.com ->2 hdd 3.61859 osd.2 up 1.00000 1.00000 ->-7 0 host node2.example.com down 0 1.00000 ->``` - -The host `node2.example.com` in the output has no disks, so it is most likely a "Phantom OSD". - -Now to remove it, use the ID in the first column of the output and replace `` with it. In the example output above the ID would be `-7`. -The commands are: - -```console -$ ceph osd out -$ ceph osd crush remove osd. -$ ceph auth del osd. -$ ceph osd rm -``` - -To recheck that the Phantom OSD was removed, re-run the following command and check if the OSD with the ID doesn't show up anymore: - -```console -ceph osd tree -``` - -## Change Failure Domain - -In Rook, it is now possible to indicate how the default CRUSH failure domain rule must be configured in order to ensure that replicas or erasure code shards are separated across hosts, and a single host failure does not affect availability. For instance, this is an example manifest of a block pool named `replicapool` configured with a `failureDomain` set to `osd`: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook -spec: - # The failure domain will spread the replicas of the data across different failure zones - failureDomain: osd - ... -``` - -However, due to several reasons, we may need to change such failure domain to its other value: `host`. Unfortunately, changing it directly in the YAML manifest is not currently handled by Rook, so we need to perform the change directly using Ceph commands using the Rook tools pod, for instance: - -```console -ceph osd pool get replicapool crush_rule -``` - ->``` ->crush_rule: replicapool ->``` - -```console -ceph osd crush rule create-replicated replicapool_host_rule default host -``` - -Notice that the suffix `host_rule` in the name of the rule is just for clearness about the type of rule we are creating here, and can be anything else as long as it is different from the existing one. Once the new rule has been created, we simply apply it to our block pool: - -```console -ceph osd pool set replicapool crush_rule replicapool_host_rule -``` - -And validate that it has been actually applied properly: - -```console -ceph osd pool get replicapool crush_rule -``` ->``` -> crush_rule: replicapool_host_rule ->``` - -If the cluster's health was `HEALTH_OK` when we performed this change, immediately, the new rule is applied to the cluster transparently without service disruption. - -Exactly the same approach can be used to change from `host` back to `osd`. diff --git a/Documentation/ceph-block.md b/Documentation/ceph-block.md deleted file mode 100644 index 62920ece6..000000000 --- a/Documentation/ceph-block.md +++ /dev/null @@ -1,232 +0,0 @@ ---- -title: Block Storage -weight: 2100 -indent: true ---- -{% include_relative branch.liquid %} - -# Block Storage - -Block storage allows a single pod to mount storage. This guide shows how to create a simple, multi-tier web application on Kubernetes using persistent volumes enabled by Rook. - -## Prerequisites - -This guide assumes a Rook cluster as explained in the [Quickstart](ceph-quickstart.md). - -## Provision Storage - -Before Rook can provision storage, a [`StorageClass`](https://kubernetes.io/docs/concepts/storage/storage-classes) and [`CephBlockPool`](ceph-pool-crd.md) need to be created. This will allow Kubernetes to interoperate with Rook when provisioning persistent volumes. - -> **NOTE**: This sample requires *at least 1 OSD per node*, with each OSD located on *3 different nodes*. - -Each OSD must be located on a different node, because the [`failureDomain`](ceph-pool-crd.md#spec) is set to `host` and the `replicated.size` is set to `3`. - -> **NOTE**: This example uses the CSI driver, which is the preferred driver going forward for K8s 1.13 and newer. Examples are found in the [CSI RBD](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd) directory. For an example of a storage class using the flex driver (required for K8s 1.12 or earlier), see the [Flex Driver](#flex-driver) section below, which has examples in the [flex](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/flex) directory. - -Save this `StorageClass` definition as `storageclass.yaml`: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - failureDomain: host - replicated: - size: 3 ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-block -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.rbd.csi.ceph.com -parameters: - # clusterID is the namespace where the rook cluster is running - clusterID: rook-ceph - # Ceph pool into which the RBD image shall be created - pool: replicapool - - # (optional) mapOptions is a comma-separated list of map options. - # For krbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options - # For nbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # mapOptions: lock_on_read,queue_depth=1024 - - # (optional) unmapOptions is a comma-separated list of unmap options. - # For krbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options - # For nbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # unmapOptions: force - - # RBD image format. Defaults to "2". - imageFormat: "2" - - # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. - imageFeatures: layering - - # The secrets contain Ceph admin credentials. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph - csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph - - # Specify the filesystem type of the volume. If not specified, csi-provisioner - # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock - # in hyperconverged settings where the volume is mounted on the same node as the osds. - csi.storage.k8s.io/fstype: ext4 - -# Delete the rbd volume when a PVC is deleted -reclaimPolicy: Delete - -# Optional, if you want to add dynamic resize for PVC. Works for Kubernetes 1.14+ -# For now only ext3, ext4, xfs resize support provided, like in Kubernetes itself. -allowVolumeExpansion: true -``` - -If you've deployed the Rook operator in a namespace other than "rook-ceph", -change the prefix in the provisioner to match the namespace -you used. For example, if the Rook operator is running in the namespace "my-namespace" the -provisioner value should be "my-namespace.rbd.csi.ceph.com". - -Create the storage class. - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml -``` - -> **NOTE**: As [specified by Kubernetes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#retain), when using the `Retain` reclaim policy, any Ceph RBD image that is backed by a `PersistentVolume` will continue to exist even after the `PersistentVolume` has been deleted. These Ceph RBD images will need to be cleaned up manually using `rbd rm`. - -## Consume the storage: Wordpress sample - -We create a sample app to consume the block storage provisioned by Rook with the classic wordpress and mysql apps. -Both of these apps will make use of block volumes provisioned by Rook. - -Start mysql and wordpress from the `cluster/examples/kubernetes` folder: - -```console -kubectl create -f mysql.yaml -kubectl create -f wordpress.yaml -``` - -Both of these apps create a block volume and mount it to their respective pod. You can see the Kubernetes volume claims by running the following: - -```console -kubectl get pvc -``` ->``` ->NAME STATUS VOLUME CAPACITY ACCESSMODES AGE ->mysql-pv-claim Bound pvc-95402dbc-efc0-11e6-bc9a-0cc47a3459ee 20Gi RWO 1m ->wp-pv-claim Bound pvc-39e43169-efc1-11e6-bc9a-0cc47a3459ee 20Gi RWO 1m -``` - -Once the wordpress and mysql pods are in the `Running` state, get the cluster IP of the wordpress app and enter it in your browser: - -```console -kubectl get svc wordpress -``` ->``` ->NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE ->wordpress 10.3.0.155 80:30841/TCP 2m ->``` - -You should see the wordpress app running. - -If you are using Minikube, the Wordpress URL can be retrieved with this one-line command: - -```console -echo http://$(minikube ip):$(kubectl get service wordpress -o jsonpath='{.spec.ports[0].nodePort}') -``` - -> **NOTE**: When running in a vagrant environment, there will be no external IP address to reach wordpress with. You will only be able to reach wordpress via the `CLUSTER-IP` from inside the Kubernetes cluster. - -## Consume the storage: Toolbox - -With the pool that was created above, we can also create a block image and mount it directly in a pod. See the [Direct Block Tools](direct-tools.md#block-storage-tools) topic for more details. - -## Teardown - -To clean up all the artifacts created by the block demo: - -``` -kubectl delete -f wordpress.yaml -kubectl delete -f mysql.yaml -kubectl delete -n rook-ceph cephblockpools.ceph.rook.io replicapool -kubectl delete storageclass rook-ceph-block -``` - -## Flex Driver - -To create a volume based on the flex driver instead of the CSI driver, see the following example of a storage class. -Make sure the flex driver is enabled over Ceph CSI. -For this, you need to set `ROOK_ENABLE_FLEX_DRIVER` to `true` in your operator deployment in the `operator.yaml` file. -The pool definition is the same as for the CSI driver. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - failureDomain: host - replicated: - size: 3 ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-block -provisioner: ceph.rook.io/block -parameters: - blockPool: replicapool - # The value of "clusterNamespace" MUST be the same as the one in which your rook cluster exist - clusterNamespace: rook-ceph - # Specify the filesystem type of the volume. If not specified, it will use `ext4`. - fstype: ext4 -# Optional, default reclaimPolicy is "Delete". Other options are: "Retain", "Recycle" as documented in https://kubernetes.io/docs/concepts/storage/storage-classes/ -reclaimPolicy: Retain -# Optional, if you want to add dynamic resize for PVC. Works for Kubernetes 1.14+ -# For now only ext3, ext4, xfs resize support provided, like in Kubernetes itself. -allowVolumeExpansion: true -``` - -Create the pool and storage class using `kubectl`: - -```console -kubectl create -f cluster/examples/kubernetes/ceph/flex/storageclass.yaml -``` - -Continue with the example above for the [wordpress application](#consume-the-storage-wordpress-sample). - -## Advanced Example: Erasure Coded Block Storage - -If you want to use erasure coded pool with RBD, your OSDs must use `bluestore` as their `storeType`. -Additionally the nodes that are going to mount the erasure coded RBD block storage must have Linux kernel >= `4.11`. - -**NOTE**: This example requires *at least 3 bluestore OSDs*, with each OSD located on a *different node*. - -The OSDs must be located on different nodes, because the [`failureDomain`](ceph-pool-crd.md#spec) is set to `host` and the `erasureCoded` chunk settings require at least 3 different OSDs (2 `dataChunks` + 1 `codingChunks`). - -To be able to use an erasure coded pool you need to create two pools (as seen below in the definitions): one erasure coded and one replicated. -> **NOTE**: This example requires *at least 3 bluestore OSDs*, with each OSD located on a *different node*. - -The OSDs must be located on different nodes, because the [`failureDomain`](ceph-pool-crd.md#spec) is set to `host` and the `erasureCoded` chunk settings require at least 3 different OSDs (2 `dataChunks` + 1 `codingChunks`). - -### Erasure Coded CSI Driver - -The erasure coded pool must be set as the `dataPool` parameter in -[`storageclass-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName -}}/cluster/examples/kubernetes/ceph/csi/rbd/storage-class-ec.yaml) It is used for the data of the RBD images. - -### Erasure Coded Flex Driver - -The erasure coded pool must be set as the `dataBlockPool` parameter in -[`storageclass-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName -}}/cluster/examples/kubernetes/ceph/flex/storage-class-ec.yaml). It is used for -the data of the RBD images. diff --git a/Documentation/ceph-client-crd.md b/Documentation/ceph-client-crd.md deleted file mode 100644 index 6cae6ffcd..000000000 --- a/Documentation/ceph-client-crd.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Client CRD -weight: 3500 -indent: true ---- - -# Ceph Client CRD - -Rook allows creation and updating clients through the custom resource definitions (CRDs). -For more information about user management and capabilities see the [Ceph docs](https://docs.ceph.com/docs/master/rados/operations/user-management/). - -## Use Case - -Use Client CRD in case you want to integrate Rook with with applications that are using LibRBD directly. -For example for OpenStack deployment with Ceph backend use Client CRD to create OpenStack services users. - -The Client CRD is not needed for Flex or CSI driver users. The drivers create the needed users automatically. - -## Creating Ceph User - -To get you started, here is a simple example of a CRD to configure a Ceph client with capabilities. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephClient -metadata: - name: glance - namespace: rook-ceph -spec: - caps: - mon: 'profile rbd' - osd: 'profile rbd pool=images' ---- -apiVersion: ceph.rook.io/v1 -kind: CephClient -metadata: - name: cinder - namespace: rook-ceph -spec: - caps: - mon: 'profile rbd' - osd: 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images' -``` - -### Prerequisites - -This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](ceph-quickstart.md) diff --git a/Documentation/ceph-cluster-crd.md b/Documentation/ceph-cluster-crd.md deleted file mode 100755 index d219c5e3e..000000000 --- a/Documentation/ceph-cluster-crd.md +++ /dev/null @@ -1,1642 +0,0 @@ ---- -title: Cluster CRD -weight: 2600 -indent: true ---- - -# Ceph Cluster CRD - -Rook allows creation and customization of storage clusters through the custom resource definitions (CRDs). -There are primarily three different modes in which to create your cluster. - -1. Specify [host paths and raw devices](#host-based-cluster) -2. Dynamically provision storage underneath Rook by specifying the storage class Rook should use to consume storage [via PVCs](#pvc-based-cluster) -3. Create a [Stretch cluster](#stretch-cluster) that distributes Ceph mons across three zones, while storage (OSDs) is only configured in two zones - -Following is an example for each of these approaches. More examples are included [later in this doc](#samples). - -## Host-based Cluster - -To get you started, here is a simple example of a CRD to configure a Ceph cluster with all nodes and all devices. -The Ceph persistent data is stored directly on a host path (Ceph Mons) and on raw devices (Ceph OSDs). - -> **NOTE**: In addition to your CephCluster object, you need to create the namespace, service accounts, and RBAC rules for the namespace you are going to create the CephCluster in. -> These resources are defined in the example `common.yaml`. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - cephVersion: - # see the "Cluster Settings" section below for more details on which image of ceph to run - image: quay.io/ceph/ceph:v16.2.5 - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: false - storage: - useAllNodes: true - useAllDevices: true - onlyApplyOSDPlacement: false -``` - -## PVC-based Cluster - -In a "PVC-based cluster", the Ceph persistent data is stored on volumes requested from a storage class of your choice. -This type of cluster is recommended in a cloud environment where volumes can be dynamically created and also -in clusters where a local PV provisioner is available. - -> **NOTE**: Kubernetes version 1.13.0 or greater is required to provision OSDs on PVCs. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - cephVersion: - # see the "Cluster Settings" section below for more details on which image of ceph to run - image: quay.io/ceph/ceph:v16.2.5 - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: false - volumeClaimTemplate: - spec: - storageClassName: gp2 - resources: - requests: - storage: 10Gi - storage: - storageClassDeviceSets: - - name: set1 - count: 3 - portable: false - encrypted: false - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, gp2) - storageClassName: gp2 - volumeMode: Block - accessModes: - - ReadWriteOnce - onlyApplyOSDPlacement: false -``` - -For a more advanced scenario, such as adding a dedicated device you can refer to the [dedicated metadata device for OSD on PVC section](#dedicated-metadata-and-wal-device-for-osd-on-pvc). - -## Stretch Cluster - -For environments that only have two failure domains available where data can be replicated, consider -the case where one failure domain is down and the data is still fully available in the -remaining failure domain. To support this scenario, Ceph has recently integrated support for "stretch" clusters. - -Rook requires three zones. Two zones (A and B) will each run all types of Rook pods, which we call the "data" zones. -Two mons run in each of the two data zones, while two replicas of the data are in each zone for a total of four data replicas. -The third zone (arbiter) runs a single mon. No other Rook or Ceph daemons need to be run in the arbiter zone. - -For this example, we assume the desired failure domain is a zone. Another failure domain can also be specified with a -known [topology node label](#osd-topology) which is already being used for OSD failure domains. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - dataDirHostPath: /var/lib/rook - mon: - # Five mons must be created for stretch mode - count: 5 - allowMultiplePerNode: false - stretchCluster: - failureDomainLabel: topology.kubernetes.io/zone - subFailureDomain: host - zones: - - name: a - arbiter: true - - name: b - - name: c - cephVersion: - # Stretch cluster is supported in Ceph Pacific or newer. - image: quay.io/ceph/ceph:v16.2.5 - allowUnsupported: true - # Either storageClassDeviceSets or the storage section can be specified for creating OSDs. - # This example uses all devices for simplicity. - storage: - useAllNodes: true - useAllDevices: true - deviceFilter: "" - # OSD placement is expected to include the non-arbiter zones - placement: - osd: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - b - - c -``` - -For more details, see the [Stretch Cluster design doc](https://github.com/rook/rook/blob/master/design/ceph/ceph-stretch-cluster.md). - -## Settings - -Settings can be specified at the global level to apply to the cluster as a whole, while other settings can be specified at more fine-grained levels. If any setting is unspecified, a suitable default will be used automatically. - -### Cluster metadata - -* `name`: The name that will be used internally for the Ceph cluster. Most commonly the name is the same as the namespace since multiple clusters are not supported in the same namespace. -* `namespace`: The Kubernetes namespace that will be created for the Rook cluster. The services, pods, and other resources created by the operator will be added to this namespace. The common scenario is to create a single Rook cluster. If multiple clusters are created, they must not have conflicting devices or host paths. - -### Cluster Settings - -* `external`: - * `enable`: if `true`, the cluster will not be managed by Rook but via an external entity. This mode is intended to connect to an existing cluster. In this case, Rook will only consume the external cluster. However, Rook will be able to deploy various daemons in Kubernetes such as object gateways, mds and nfs if an image is provided and will refuse otherwise. If this setting is enabled **all** the other options will be ignored except `cephVersion.image` and `dataDirHostPath`. See [external cluster configuration](#external-cluster). If `cephVersion.image` is left blank, Rook will refuse the creation of extra CRs like object, file and nfs. -* `cephVersion`: The version information for launching the ceph daemons. - * `image`: The image used for running the ceph daemons. For example, `quay.io/ceph/ceph:v15.2.12` or `v16.2.5`. For more details read the [container images section](#ceph-container-images). - For the latest ceph images, see the [Ceph DockerHub](https://hub.docker.com/r/ceph/ceph/tags/). - To ensure a consistent version of the image is running across all nodes in the cluster, it is recommended to use a very specific image version. - Tags also exist that would give the latest version, but they are only recommended for test environments. For example, the tag `v14` will be updated each time a new nautilus build is released. - Using the `v14` or similar tag is not recommended in production because it may lead to inconsistent versions of the image running across different nodes in the cluster. - * `allowUnsupported`: If `true`, allow an unsupported major version of the Ceph release. Currently `nautilus`, `octopus`, and `pacific` are supported. Future versions such as `quincy` would require this to be set to `true`. Should be set to `false` in production. -* `dataDirHostPath`: The path on the host ([hostPath](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath)) where config and data should be stored for each of the services. If the directory does not exist, it will be created. Because this directory persists on the host, it will remain after pods are deleted. Following paths and any of their subpaths **must not be used**: `/etc/ceph`, `/rook` or `/var/log/ceph`. - * On **Minikube** environments, use `/data/rook`. Minikube boots into a tmpfs but it provides some [directories](https://github.com/kubernetes/minikube/blob/master/site/content/en/docs/handbook/persistent_volumes.md#a-note-on-mounts-persistence-and-minikube-hosts) where files can be persisted across reboots. Using one of these directories will ensure that Rook's data and configuration files are persisted and that enough storage space is available. - * **WARNING**: For test scenarios, if you delete a cluster and start a new cluster on the same hosts, the path used by `dataDirHostPath` must be deleted. Otherwise, stale keys and other config will remain from the previous cluster and the new mons will fail to start. -If this value is empty, each pod will get an ephemeral directory to store their config files that is tied to the lifetime of the pod running on that node. More details can be found in the Kubernetes [empty dir docs](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir). -* `skipUpgradeChecks`: if set to true Rook won't perform any upgrade checks on Ceph daemons during an upgrade. Use this at **YOUR OWN RISK**, only if you know what you're doing. To understand Rook's upgrade process of Ceph, read the [upgrade doc](ceph-upgrade.md#ceph-version-upgrades). -* `continueUpgradeAfterChecksEvenIfNotHealthy`: if set to true Rook will continue the OSD daemon upgrade process even if the PGs are not clean, or continue with the MDS upgrade even the file system is not healthy. -* `dashboard`: Settings for the Ceph dashboard. To view the dashboard in your browser see the [dashboard guide](ceph-dashboard.md). - * `enabled`: Whether to enable the dashboard to view cluster status - * `urlPrefix`: Allows to serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) - * `port`: Allows to change the default port where the dashboard is served - * `ssl`: Whether to serve the dashboard via SSL, ignored on Ceph versions older than `13.2.2` -* `monitoring`: Settings for monitoring Ceph using Prometheus. To enable monitoring on your cluster see the [monitoring guide](ceph-monitoring.md#prometheus-alerts). - * `enabled`: Whether to enable prometheus based monitoring for this cluster - * `externalMgrEndpoints`: external cluster manager endpoints - * `externalMgrPrometheusPort`: external prometheus manager module port. See [external cluster configuration](#external-cluster) for more details. - * `rulesNamespace`: Namespace to deploy prometheusRule. If empty, namespace of the cluster will be used. - Recommended: - * If you have a single Rook Ceph cluster, set the `rulesNamespace` to the same namespace as the cluster or keep it empty. - * If you have multiple Rook Ceph clusters in the same Kubernetes cluster, choose the same namespace to set `rulesNamespace` for all the clusters (ideally, namespace with prometheus deployed). Otherwise, you will get duplicate alerts with duplicate alert definitions. -* `network`: For the network settings for the cluster, refer to the [network configuration settings](#network-configuration-settings) -* `mon`: contains mon related options [mon settings](#mon-settings) -For more details on the mons and when to choose a number other than `3`, see the [mon health doc](ceph-mon-health.md). -* `mgr`: manager top level section - * `count`: set number of ceph managers between `1` to `2`. The default value is 1. This is only needed if plural ceph managers are needed. - * `modules`: is the list of Ceph manager modules to enable -* `crashCollector`: The settings for crash collector daemon(s). - * `disable`: is set to `true`, the crash collector will not run on any node where a Ceph daemon runs - * `daysToRetain`: specifies the number of days to keep crash entries in the Ceph cluster. By default the entries are kept indefinitely. -* `logCollector`: The settings for log collector daemon. - * `enabled`: if set to `true`, the log collector will run as a side-car next to each Ceph daemon. The Ceph configuration option `log_to_file` will be turned on, meaning Ceph daemons will log on files in addition to still logging to container's stdout. These logs will be rotated. (default: false) - * `periodicity`: how often to rotate daemon's log. (default: 24h). Specified with a time suffix which may be 'h' for hours or 'd' for days. **Rotating too often will slightly impact the daemon's performance since the signal briefly interrupts the program.** -* `annotations`: [annotations configuration settings](#annotations-and-labels) -* `labels`: [labels configuration settings](#annotations-and-labels) -* `placement`: [placement configuration settings](#placement-configuration-settings) -* `resources`: [resources configuration settings](#cluster-wide-resources-configuration-settings) -* `priorityClassNames`: [priority class names configuration settings](#priority-class-names-configuration-settings) -* `storage`: Storage selection and configuration that will be used across the cluster. Note that these settings can be overridden for specific nodes. - * `useAllNodes`: `true` or `false`, indicating if all nodes in the cluster should be used for storage according to the cluster level storage selection and configuration values. - If individual nodes are specified under the `nodes` field, then `useAllNodes` must be set to `false`. - * `nodes`: Names of individual nodes in the cluster that should have their storage included in accordance with either the cluster level configuration specified above or any node specific overrides described in the next section below. - `useAllNodes` must be set to `false` to use specific nodes and their config. - See [node settings](#node-settings) below. - * `config`: Config settings applied to all OSDs on the node unless overridden by `devices`. See the [config settings](#osd-configuration-settings) below. - * [storage selection settings](#storage-selection-settings) - * [Storage Class Device Sets](#storage-class-device-sets) - * `onlyApplyOSDPlacement`: Whether the placement specific for OSDs is merged with the `all` placement. If `false`, the OSD placement will be merged with the `all` placement. If true, the `OSD placement will be applied` and the `all` placement will be ignored. The placement for OSDs is computed from several different places depending on the type of OSD: - - For non-PVCs: `placement.all` and `placement.osd` - - For PVCs: `placement.all` and inside the storageClassDeviceSet from the `placement` or `preparePlacement` - * `managePodBudgets`: if `true`, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will block eviction of OSDs by default and unblock them safely when drains are detected. - * `osdMaintenanceTimeout`: is a duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. - * `manageMachineDisruptionBudgets`: if `true`, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. Only available on OpenShift. - * `machineDisruptionBudgetNamespace`: the namespace in which to watch the MachineDisruptionBudgets. -* `removeOSDsIfOutAndSafeToRemove`: If `true` the operator will remove the OSDs that are down and whose data has been restored to other OSDs. In Ceph terms, the OSDs are `out` and `safe-to-destroy` when they are removed. -* `cleanupPolicy`: [cleanup policy settings](#cleanup-policy) -* `security`: [security settings](#security) - -### Ceph container images - -Official releases of Ceph Container images are available from [Docker Hub](https://hub.docker.com/r/ceph -). - -These are general purpose Ceph container with all necessary daemons and dependencies installed. - -| TAG | MEANING | -| -------------------- | --------------------------------------------------------- | -| vRELNUM | Latest release in this series (e.g., *v14* = Nautilus) | -| vRELNUM.Y | Latest stable release in this stable series (e.g., v14.2) | -| vRELNUM.Y.Z | A specific release (e.g., v14.2.5) | -| vRELNUM.Y.Z-YYYYMMDD | A specific build (e.g., v14.2.5-20191203) | - -A specific will contain a specific release of Ceph as well as security fixes from the Operating System. - -### Mon Settings - -* `count`: Set the number of mons to be started. The number must be odd and between `1` and `9`. If not specified the default is set to `3`. -* `allowMultiplePerNode`: Whether to allow the placement of multiple mons on a single node. Default is `false` for production. Should only be set to `true` in test environments. -* `volumeClaimTemplate`: A `PersistentVolumeSpec` used by Rook to create PVCs - for monitor storage. This field is optional, and when not provided, HostPath - volume mounts are used. The current set of fields from template that are used - are `storageClassName` and the `storage` resource request and limit. The - default storage size request for new PVCs is `10Gi`. Ensure that associated - storage class is configured to use `volumeBindingMode: WaitForFirstConsumer`. - This setting only applies to new monitors that are created when the requested - number of monitors increases, or when a monitor fails and is recreated. An - [example CRD configuration is provided below](#using-pvc-storage-for-monitors). -* `stretchCluster`: The stretch cluster settings that define the zones (or other failure domain labels) across which to configure the cluster. - * `failureDomainLabel`: The label that is expected on each node where the cluster is expected to be deployed. The labels must be found - in the list of well-known [topology labels](#osd-topology). - * `subFailureDomain`: With a zone, the data replicas must be spread across OSDs in the subFailureDomain. The default is `host`. - * `zones`: The failure domain names where the Mons and OSDs are expected to be deployed. There must be **three zones** specified in the list. - This element is always named `zone` even if a non-default `failureDomainLabel` is specified. The elements have two values: - * `name`: The name of the zone, which is the value of the domain label. - * `arbiter`: Whether the zone is expected to be the arbiter zone which only runs a single mon. Exactly one zone must be labeled `true`. - The two zones that are not the arbiter zone are expected to have OSDs deployed. - -If these settings are changed in the CRD the operator will update the number of mons during a periodic check of the mon health, which by default is every 45 seconds. - -To change the defaults that the operator uses to determine the mon health and whether to failover a mon, refer to the [health settings](#health-settings). The intervals should be small enough that you have confidence the mons will maintain quorum, while also being long enough to ignore network blips where mons are failed over too often. - -### Mgr Settings - -You can use the cluster CR to enable or disable any manager module. This can be configured like so: - -```yaml -mgr: - modules: - - name: - enabled: true -``` - -Some modules will have special configuration to ensure the module is fully functional after being enabled. Specifically: - -* `pg_autoscaler`: Rook will configure all new pools with PG autoscaling by setting: `osd_pool_default_pg_autoscale_mode = on` - -### Network Configuration Settings - -If not specified, the default SDN will be used. -Configure the network that will be enabled for the cluster and services. - -* `provider`: Specifies the network provider that will be used to connect the network interface. You can choose between `host`, and `multus`. -* `selectors`: List the network selector(s) that will be used associated by a key. -* `ipFamily`: Specifies the network stack Ceph daemons should listen on. -* `dualStack`: Specifies that Ceph daemon should listen on both IPv4 and IPv6 network stacks. - -> **NOTE:** Changing networking configuration after a Ceph cluster has been deployed is NOT -> supported and will result in a non-functioning cluster. - -#### Host Networking - -To use host networking, set `provider: host`. - -#### Multus - -Rook supports addition of public and cluster network for ceph using Multus - -The selector keys are required to be `public` and `cluster` where each represent: - -* `public`: client communications with the cluster (reads/writes) -* `cluster`: internal Ceph replication network - -If you want to learn more, please read -* [Ceph Networking reference](https://docs.ceph.com/docs/master/rados/configuration/network-config-ref/). -* [Multus documentation](https://intel.github.io/multus-cni/doc/how-to-use.html) - -Based on the configuration, the operator will do the following: - - 1. If only the `public` selector is specified, all communication will happen on that network -```yaml - network: - provider: multus - selectors: - public: rook-ceph/rook-public-nw -``` - 2. If only the `cluster` selector is specified, the internal cluster traffic* will happen on that network. All other traffic to mons, OSDs, and other daemons will be on the default network. -```yaml - network: - provider: multus - selectors: - cluster: rook-ceph/rook-cluster-nw -``` - 3. If both `public` and `cluster` selectors are specified the first one will run all the communication network and the second the internal cluster network* -```yaml - network: - provider: multus - selectors: - public: rook-ceph/rook-public-nw - cluster: rook-ceph/rook-cluster-nw -``` - -\* Internal cluster traffic includes OSD heartbeats, data replication, and data recovery - -Only OSD pods will have both Public and Cluster networks attached. The rest of the Ceph component pods and CSI pods will only have the Public network attached. -Rook Ceph Operator will not have any networks attached as it proxies the required commands via a sidecar container in the mgr pod. - -In order to work, each selector value must match a `NetworkAttachmentDefinition` object name in Multus. - -For `multus` network provider, an already working cluster with Multus networking is required. Network attachment definition that later will be attached to the cluster needs to be created before the Cluster CRD. -The Network attachment definitions should be using whereabouts cni. -If Rook cannot find the provided Network attachment definition it will fail running the Ceph OSD pods. -You can add the Multus network attachment selection annotation selecting the created network attachment definition on `selectors`. - -A valid NetworkAttachmentDefinition will look like following: - -```yaml -apiVersion: "k8s.cni.cncf.io/v1" -kind: NetworkAttachmentDefinition -metadata: - name: rook-public-nw -spec: - config: '{ - "cniVersion": "0.3.0", - "name": "public-nad", - "type": "macvlan", - "master": "ens5", - "mode": "bridge", - "ipam": { - "type": "whereabouts", - "range": "192.168.1.0/24" - } - }' -``` - -* Ensure that `master` matches the network interface of the host that you want to use. -* Ipam type `whereabouts` is required because it makes sure that all the pods get a unique IP address from the multus network. -* The NetworkAttachmentDefinition should be referenced along with the namespace in which it is present like `public: /`. - e.g., the network attachment definition are in `default` namespace: - ```yaml - public: default/rook-public-nw - cluster: default/rook-cluster-nw - ``` - * This format is required in order to use the NetworkAttachmentDefinition across namespaces. - * In Openshift, to use a NetworkAttachmentDefinition (NAD) across namespaces, the NAD must be deployed in the `default` namespace. The NAD is then referenced with the namespace: `default/rook-public-nw` - -#### Known issues with multus -When a CephFS/RBD volume is mounted in a Pod using cephcsi and then the CSI CephFS/RBD plugin is restarted or terminated (e.g. by restarting or deleting its DaemonSet), all operations on the volume become blocked, even after restarting the CSI pods. The only workaround is to restart the node where the cephcsi plugin pod was restarted. -This issue is tracked [here](https://github.com/rook/rook/issues/8085). - -#### IPFamily - -Provide single-stack IPv4 or IPv6 protocol to assign corresponding addresses to pods and services. This field is optional. Possible inputs are IPv6 and IPv4. Empty value will be treated as IPv4. Kubernetes version should be at least v1.13 to run IPv6. Dual-stack is supported as of ceph Pacific. -To turn on dual stack see the [network configuration section](#network-configuration-settings). - -### Node Settings - -In addition to the cluster level settings specified above, each individual node can also specify configuration to override the cluster level settings and defaults. -If a node does not specify any configuration then it will inherit the cluster level settings. - -* `name`: The name of the node, which should match its `kubernetes.io/hostname` label. -* `config`: Config settings applied to all OSDs on the node unless overridden by `devices`. See the [config settings](#osd-configuration-settings) below. -* [storage selection settings](#storage-selection-settings) - -When `useAllNodes` is set to `true`, Rook attempts to make Ceph cluster management as hands-off as -possible while still maintaining reasonable data safety. If a usable node comes online, Rook will -begin to use it automatically. To maintain a balance between hands-off usability and data safety, -Nodes are removed from Ceph as OSD hosts only (1) if the node is deleted from Kubernetes itself or -(2) if the node has its taints or affinities modified in such a way that the node is no longer -usable by Rook. Any changes to taints or affinities, intentional or unintentional, may affect the -data reliability of the Ceph cluster. In order to help protect against this somewhat, deletion of -nodes by taint or affinity modifications must be "confirmed" by deleting the Rook-Ceph operator pod -and allowing the operator deployment to restart the pod. - -For production clusters, we recommend that `useAllNodes` is set to `false` to prevent the Ceph -cluster from suffering reduced data reliability unintentionally due to a user mistake. When -`useAllNodes` is set to `false`, Rook relies on the user to be explicit about when nodes are added -to or removed from the Ceph cluster. Nodes are only added to the Ceph cluster if the node is added -to the Ceph cluster resource. Similarly, nodes are only removed if the node is removed from the Ceph -cluster resource. - -#### Node Updates - -Nodes can be added and removed over time by updating the Cluster CRD, for example with `kubectl -n rook-ceph edit cephcluster rook-ceph`. -This will bring up your default text editor and allow you to add and remove storage nodes from the cluster. -This feature is only available when `useAllNodes` has been set to `false`. - -### Storage Selection Settings - -Below are the settings for host-based cluster. This type of cluster can specify devices for OSDs, both at the cluster and individual node level, for selecting which storage resources will be included in the cluster. - -* `useAllDevices`: `true` or `false`, indicating whether all devices found on nodes in the cluster should be automatically consumed by OSDs. **Not recommended** unless you have a very controlled environment where you will not risk formatting of devices with existing data. When `true`, all devices/partitions will be used. Is overridden by `deviceFilter` if specified. -* `deviceFilter`: A regular expression for short kernel names of devices (e.g. `sda`) that allows selection of devices to be consumed by OSDs. If individual devices have been specified for a node then this filter will be ignored. This field uses [golang regular expression syntax](https://golang.org/pkg/regexp/syntax/). For example: - * `sdb`: Only selects the `sdb` device if found - * `^sd.`: Selects all devices starting with `sd` - * `^sd[a-d]`: Selects devices starting with `sda`, `sdb`, `sdc`, and `sdd` if found - * `^s`: Selects all devices that start with `s` - * `^[^r]`: Selects all devices that do *not* start with `r` -* `devicePathFilter`: A regular expression for device paths (e.g. `/dev/disk/by-path/pci-0:1:2:3-scsi-1`) that allows selection of devices to be consumed by OSDs. If individual devices or `deviceFilter` have been specified for a node then this filter will be ignored. This field uses [golang regular expression syntax](https://golang.org/pkg/regexp/syntax/). For example: - * `^/dev/sd.`: Selects all devices starting with `sd` - * `^/dev/disk/by-path/pci-.*`: Selects all devices which are connected to PCI bus -* `devices`: A list of individual device names belonging to this node to include in the storage cluster. - * `name`: The name of the device (e.g., `sda`), or full udev path (e.g. `/dev/disk/by-id/ata-ST4000DM004-XXXX` - this will not change after reboots). - * `config`: Device-specific config settings. See the [config settings](#osd-configuration-settings) below - -Host-based cluster only supports raw device and partition. Be sure to see the -[Ceph quickstart doc prerequisites](ceph-quickstart.md#prerequisites) for additional considerations. - -Below are the settings for a PVC-based cluster. - -* `storageClassDeviceSets`: Explained in [Storage Class Device Sets](#storage-class-device-sets) - -### Storage Class Device Sets - -The following are the settings for Storage Class Device Sets which can be configured to create OSDs that are backed by block mode PVs. - -* `name`: A name for the set. -* `count`: The number of devices in the set. -* `resources`: The CPU and RAM requests/limits for the devices. (Optional) -* `placement`: The placement criteria for the devices. (Optional) Default is no placement criteria. - - The syntax is the same as for [other placement configuration](#placement-configuration-settings). It supports `nodeAffinity`, `podAffinity`, `podAntiAffinity` and `tolerations` keys. - - It is recommended to configure the placement such that the OSDs will be as evenly spread across nodes as possible. At a minimum, anti-affinity should be added so at least one OSD will be placed on each available nodes. - - However, if there are more OSDs than nodes, this anti-affinity will not be effective. Another placement scheme to consider is to add labels to the nodes in such a way that the OSDs can be grouped on those nodes, create multiple storageClassDeviceSets, and add node affinity to each of the device sets that will place the OSDs in those sets of nodes. - - Rook will automatically add required nodeAffinity to the OSD daemons to match the topology labels that are found - on the nodes where the OSD prepare jobs ran. To ensure data durability, the OSDs are required to run in the same - topology that the Ceph CRUSH map expects. For example, if the nodes are labeled with rack topology labels, the - OSDs will be constrained to a certain rack. Without the topology labels, Rook will not constrain the OSDs beyond - what is required by the PVs, for example to run in the zone where provisioned. See the [OSD Topology](#osd-topology) - section for the related labels. - -* `preparePlacement`: The placement criteria for the preparation of the OSD devices. Creating OSDs is a two-step process and the prepare job may require different placement than the OSD daemons. If the `preparePlacement` is not specified, the `placement` will instead be applied for consistent placement for the OSD prepare jobs and OSD deployments. The `preparePlacement` is only useful for `portable` OSDs in the device sets. OSDs that are not portable will be tied to the host where the OSD prepare job initially runs. - * For example, provisioning may require topology spread constraints across zones, but the OSD daemons may require constraints across hosts within the zones. -* `portable`: If `true`, the OSDs will be allowed to move between nodes during failover. This requires a storage class that supports portability (e.g. `aws-ebs`, but not the local storage provisioner). If `false`, the OSDs will be assigned to a node permanently. Rook will configure Ceph's CRUSH map to support the portability. -* `tuneDeviceClass`: For example, Ceph cannot detect AWS volumes as HDDs from the storage class "gp2", so you can improve Ceph performance by setting this to true. -* `tuneFastDeviceClass`: For example, Ceph cannot detect Azure disks as SSDs from the storage class "managed-premium", so you can improve Ceph performance by setting this to true.. -* `volumeClaimTemplates`: A list of PVC templates to use for provisioning the underlying storage devices. - * `resources.requests.storage`: The desired capacity for the underlying storage devices. - * `storageClassName`: The StorageClass to provision PVCs from. Default would be to use the cluster-default StorageClass. This StorageClass should provide a raw block device, multipath device, or logical volume. Other types are not supported. If you want to use logical volume, please see [known issue of OSD on LV-backed PVC](ceph-common-issues.md#lvm-metadata-can-be-corrupted-with-osd-on-lv-backed-pvc) - * `volumeMode`: The volume mode to be set for the PVC. Which should be Block - * `accessModes`: The access mode for the PVC to be bound by OSD. -* `schedulerName`: Scheduler name for OSD pod placement. (Optional) -* `encrypted`: whether to encrypt all the OSDs in a given storageClassDeviceSet - -### OSD Configuration Settings - -The following storage selection settings are specific to Ceph and do not apply to other backends. All variables are key-value pairs represented as strings. - -* `metadataDevice`: Name of a device to use for the metadata of OSDs on each node. Performance can be improved by using a low latency device (such as SSD or NVMe) as the metadata device, while other spinning platter (HDD) devices on a node are used to store data. Provisioning will fail if the user specifies a `metadataDevice` but that device is not used as a metadata device by Ceph. Notably, `ceph-volume` will not use a device of the same device class (HDD, SSD, NVMe) as OSD devices for metadata, resulting in this failure. -* `databaseSizeMB`: The size in MB of a bluestore database. Include quotes around the size. -* `walSizeMB`: The size in MB of a bluestore write ahead log (WAL). Include quotes around the size. -* `deviceClass`: The [CRUSH device class](https://ceph.io/community/new-luminous-crush-device-classes/) to use for this selection of storage devices. (By default, if a device's class has not already been set, OSDs will automatically set a device's class to either `hdd`, `ssd`, or `nvme` based on the hardware properties exposed by the Linux kernel.) These storage classes can then be used to select the devices backing a storage pool by specifying them as the value of [the pool spec's `deviceClass` field](ceph-pool-crd.md#spec). -* `initialWeight`: The initial OSD weight in TiB units. By default, this value is derived from OSD's capacity. -* `primaryAffinity`: The [primary-affinity](https://docs.ceph.com/en/latest/rados/operations/crush-map/#primary-affinity) value of an OSD, within range `[0, 1]` (default: `1`). -* `osdsPerDevice`**: The number of OSDs to create on each device. High performance devices such as NVMe can handle running multiple OSDs. If desired, this can be overridden for each node and each device. -* `encryptedDevice`**: Encrypt OSD volumes using dmcrypt ("true" or "false"). By default this option is disabled. See [encryption](http://docs.ceph.com/docs/master/ceph-volume/lvm/encryption/) for more information on encryption in Ceph. -* `crushRoot`: The value of the `root` CRUSH map label. The default is `default`. Generally, you should not need to change this. However, if any of your topology labels may have the value `default`, you need to change `crushRoot` to avoid conflicts, since CRUSH map values need to be unique. - -### Annotations and Labels - -Annotations and Labels can be specified so that the Rook components will have those annotations / labels added to them. - -You can set annotations / labels for Rook components for the list of key value pairs: - -* `all`: Set annotations / labels for all components -* `mgr`: Set annotations / labels for MGRs -* `mon`: Set annotations / labels for mons -* `osd`: Set annotations / labels for OSDs -* `prepareosd`: Set annotations / labels for OSD Prepare Jobs -When other keys are set, `all` will be merged together with the specific component. - -### Placement Configuration Settings - -Placement configuration for the cluster services. It includes the following keys: `mgr`, `mon`, `arbiter`, `osd`, `cleanup`, and `all`. -Each service will have its placement configuration generated by merging the generic configuration under `all` with the most specific one (which will override any attributes). - -In stretch clusters, if the `arbiter` placement is specified, that placement will only be applied to the arbiter. -Neither will the `arbiter` placement be merged with the `all` placement to allow the arbiter to be fully independent of other daemon placement. -The remaining mons will still use the `mon` and/or `all` sections. - - -**NOTE:** Placement of OSD pods is controlled using the [Storage Class Device Set](#storage-class-device-sets), not the general `placement` configuration. - -A Placement configuration is specified (according to the kubernetes PodSpec) as: - -* `nodeAffinity`: kubernetes [NodeAffinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature) -* `podAffinity`: kubernetes [PodAffinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) -* `podAntiAffinity`: kubernetes [PodAntiAffinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) -* `tolerations`: list of kubernetes [Toleration](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) -* `topologySpreadConstraints`: kubernetes [TopologySpreadConstraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) - -If you use `labelSelector` for `osd` pods, you must write two rules both for `rook-ceph-osd` and `rook-ceph-osd-prepare` like [the example configuration](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml#L68). It comes from the design that there are these two pods for an OSD. For more detail, see the [osd design doc](https://github.com/rook/rook/blob/master/design/ceph/dedicated-osd-pod.md) and [the related issue](https://github.com/rook/rook/issues/4582). - -The Rook Ceph operator creates a Job called `rook-ceph-detect-version` to detect the full Ceph version used by the given `cephVersion.image`. The placement from the `mon` section is used for the Job except for the `PodAntiAffinity` field. - -### Cluster-wide Resources Configuration Settings - -Resources should be specified so that the Rook components are handled after [Kubernetes Pod Quality of Service classes](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/). -This allows to keep Rook components running when for example a node runs out of memory and the Rook components are not killed depending on their Quality of Service class. - -You can set resource requests/limits for Rook components through the [Resource Requirements/Limits](#resource-requirementslimits) structure in the following keys: - -* `mon`: Set resource requests/limits for mons -* `osd`: Set resource requests/limits for OSDs. - This key applies for all OSDs regardless of their device classes. In case of need to apply resource requests/limits for OSDs with particular device class use specific osd keys below. If the memory resource is declared Rook will automatically set the OSD configuration `osd_memory_target` to the same value. This aims to ensure that the actual OSD memory consumption is consistent with the OSD pods' resource declaration. -* `osd-`: Set resource requests/limits for OSDs on a specific device class. Rook will automatically detect `hdd`, - `ssd`, or `nvme` device classes. Custom device classes can also be set. -* `mgr`: Set resource requests/limits for MGRs -* `mgr-sidecar`: Set resource requests/limits for the MGR sidecar, which is only created when `mgr.count: 2`. - The sidecar requires very few resources since it only executes every 15 seconds to query Ceph for the active - mgr and update the mgr services if the active mgr changed. -* `prepareosd`: Set resource requests/limits for OSD prepare job -* `crashcollector`: Set resource requests/limits for crash. This pod runs wherever there is a Ceph pod running. -It scrapes for Ceph daemon core dumps and sends them to the Ceph manager crash module so that core dumps are centralized and can be easily listed/accessed. -You can read more about the [Ceph Crash module](https://docs.ceph.com/docs/master/mgr/crash/). -* `logcollector`: Set resource requests/limits for the log collector. When enabled, this container runs as side-car to each Ceph daemons. -* `cleanup`: Set resource requests/limits for cleanup job, responsible for wiping cluster's data after uninstall - -In order to provide the best possible experience running Ceph in containers, Rook internally recommends minimum memory limits if resource limits are passed. -If a user configures a limit or request value that is too low, Rook will still run the pod(s) and print a warning to the operator log. - -* `mon`: 1024MB -* `mgr`: 512MB -* `osd`: 2048MB -* `prepareosd`: 50MB -* `crashcollector`: 60MB -* `mgr-sidecar`: 100MB limit, 40MB requests - -> **HINT** The resources for MDS daemons are not configured in the Cluster. Refer to the [Ceph Filesystem CRD](ceph-filesystem-crd.md) instead. - -### Resource Requirements/Limits - -For more information on resource requests/limits see the official Kubernetes documentation: [Kubernetes - Managing Compute Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) - -* `requests`: Requests for cpu or memory. - * `cpu`: Request for CPU (example: one CPU core `1`, 50% of one CPU core `500m`). - * `memory`: Limit for Memory (example: one gigabyte of memory `1Gi`, half a gigabyte of memory `512Mi`). -* `limits`: Limits for cpu or memory. - * `cpu`: Limit for CPU (example: one CPU core `1`, 50% of one CPU core `500m`). - * `memory`: Limit for Memory (example: one gigabyte of memory `1Gi`, half a gigabyte of memory `512Mi`). - -### Priority Class Names Configuration Settings - -Priority class names can be specified so that the Rook components will have those priority class names added to them. - -You can set priority class names for Rook components for the list of key value pairs: - -* `all`: Set priority class names for MGRs, Mons, OSDs. -* `mgr`: Set priority class names for MGRs. -* `mon`: Set priority class names for Mons. -* `osd`: Set priority class names for OSDs. - -The specific component keys will act as overrides to `all`. - -### Health settings - -Rook-Ceph will monitor the state of the CephCluster on various components by default. -The following CRD settings are available: - -* `healthCheck`: main ceph cluster health monitoring section - -Currently three health checks are implemented: - -* `mon`: health check on the ceph monitors, basically check whether monitors are members of the quorum. If after a certain timeout a given monitor has not joined the quorum back it will be failed over and replace by a new monitor. -* `osd`: health check on the ceph osds -* `status`: ceph health status check, periodically check the Ceph health state and reflects it in the CephCluster CR status field. - -The liveness probe of each daemon can also be controlled via `livenessProbe`, the setting is valid for `mon`, `mgr` and `osd`. -Here is a complete example for both `daemonHealth` and `livenessProbe`: - -```yaml -healthCheck: - daemonHealth: - mon: - disabled: false - interval: 45s - timeout: 600s - osd: - disabled: false - interval: 60s - status: - disabled: false - livenessProbe: - mon: - disabled: false - mgr: - disabled: false - osd: - disabled: false -``` - -The probe itself can also be overridden, refer to the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command). - -For example, you could change the `mgr` probe by applying: - -```yaml -healthCheck: - livenessProbe: - mgr: - disabled: false - probe: - httpGet: - path: / - port: 9283 - initialDelaySeconds: 3 - periodSeconds: 3 -``` - -Changing the liveness probe is an advanced operation and should rarely be necessary. If you want to change these settings then modify the desired settings. - -## Status - -The operator is regularly configuring and checking the health of the cluster. The results of the configuration -and health checks can be seen in the `status` section of the CephCluster CR. - -``` -kubectl -n rook-ceph get CephCluster -o yaml -``` - -```yaml - ... - status: - ceph: - health: HEALTH_OK - lastChecked: "2021-03-02T21:22:11Z" - capacity: - bytesAvailable: 22530293760 - bytesTotal: 25757220864 - bytesUsed: 3226927104 - lastUpdated: "2021-03-02T21:22:11Z" - message: Cluster created successfully - phase: Ready - state: Created - storage: - deviceClasses: - - name: hdd - version: - image: quay.io/ceph/ceph:v16.2.5 - version: 16.2.5-0 - conditions: - - lastHeartbeatTime: "2021-03-02T21:22:11Z" - lastTransitionTime: "2021-03-02T21:21:09Z" - message: Cluster created successfully - reason: ClusterCreated - status: "True" - type: Ready -``` - -### Ceph Status - -Ceph is constantly monitoring the health of the data plane and reporting back if there are -any warnings or errors. If everything is healthy from Ceph's perspective, you will see -`HEALTH_OK`. - -If Ceph reports any warnings or errors, the details will be printed to the status. -If further troubleshooting is needed to resolve these issues, the toolbox will likely -be needed where you can run `ceph` commands to find more details. - -The `capacity` of the cluster is reported, including bytes available, total, and used. -The available space will be less that you may expect due to overhead in the OSDs. - -### Conditions - -The `conditions` represent the status of the Rook operator. -- If the cluster is fully configured and the operator is stable, the - `Ready` condition is raised with `ClusterCreated` reason and no other conditions. The cluster - will remain in the `Ready` condition after the first successful configuration since it - is expected the storage is consumable from this point on. If there are issues preventing - the storage layer from working, they are expected to show as Ceph health errors. -- If the cluster is externally connected successfully, the `Ready` condition will have the reason `ClusterConnected`. -- If the operator is currently being configured or the operator is checking for update, - there will be a `Progressing` condition. -- If there was a failure, the condition(s) status will be `false` and the `message` will - give a summary of the error. See the operator log for more details. - -### Other Status - -There are several other properties for the overall status including: -- `message`, `phase`, and `state`: A summary of the overall current state of the cluster, which - is somewhat duplicated from the conditions for backward compatibility. -- `storage.deviceClasses`: The names of the types of storage devices that Ceph discovered - in the cluster. These types will be `ssd` or `hdd` unless they have been overridden - with the `crushDeviceClass` in the `storageClassDeviceSets`. -- `version`: The version of the Ceph image currently deployed. - -## Samples - -Here are several samples for configuring Ceph clusters. Each of the samples must also include the namespace and corresponding access granted for management by the Ceph operator. See the [common cluster resources](#common-cluster-resources) below. - -### Storage configuration: All devices - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: false - dashboard: - enabled: true - # cluster level storage configuration and selection - storage: - useAllNodes: true - useAllDevices: true - deviceFilter: - config: - metadataDevice: - databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger) - journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger) - osdsPerDevice: "1" -``` - -### Storage Configuration: Specific devices - -Individual nodes and their config can be specified so that only the named nodes below will be used as storage resources. -Each node's 'name' field should match their 'kubernetes.io/hostname' label. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: false - dashboard: - enabled: true - # cluster level storage configuration and selection - storage: - useAllNodes: false - useAllDevices: false - deviceFilter: - config: - metadataDevice: - databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger) - nodes: - - name: "172.17.4.201" - devices: # specific devices to use for storage can be specified for each node - - name: "sdb" # Whole storage device - - name: "sdc1" # One specific partition. Should not have a file system on it. - - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # both device name and explicit udev links are supported - config: # configuration can be specified at the node level which overrides the cluster level config - - name: "172.17.4.301" - deviceFilter: "^sd." -``` - -### Node Affinity - -To control where various services will be scheduled by kubernetes, use the placement configuration sections below. -The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and -tolerate taints with a key of 'storage-node'. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: false - # enable the ceph dashboard for viewing cluster status - dashboard: - enabled: true - placement: - all: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: role - operator: In - values: - - storage-node - tolerations: - - key: storage-node - operator: Exists - mgr: - nodeAffinity: - tolerations: - mon: - nodeAffinity: - tolerations: - osd: - nodeAffinity: - tolerations: -``` - -### Resource Requests/Limits - -To control how many resources the Rook components can request/use, you can set requests and limits in Kubernetes for them. -You can override these requests/limits for OSDs per node when using `useAllNodes: false` in the `node` item in the `nodes` list. - -> **WARNING**: Before setting resource requests/limits, please take a look at the Ceph documentation for recommendations for each component: [Ceph - Hardware Recommendations](http://docs.ceph.com/docs/master/start/hardware-recommendations/). - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: false - # enable the ceph dashboard for viewing cluster status - dashboard: - enabled: true - # cluster level resource requests/limits configuration - resources: - storage: - useAllNodes: false - nodes: - - name: "172.17.4.201" - resources: - limits: - cpu: "2" - memory: "4096Mi" - requests: - cpu: "2" - memory: "4096Mi" -``` - -### OSD Topology - -The topology of the cluster is important in production environments where you want your data spread across failure domains. The topology -can be controlled by adding labels to the nodes. When the labels are found on a node at first OSD deployment, Rook will add them to -the desired level in the [CRUSH map](https://docs.ceph.com/en/latest/rados/operations/crush-map/). - -The complete list of labels in hierarchy order from highest to lowest is: - -```text -topology.kubernetes.io/region -topology.kubernetes.io/zone -topology.rook.io/datacenter -topology.rook.io/room -topology.rook.io/pod -topology.rook.io/pdu -topology.rook.io/row -topology.rook.io/rack -topology.rook.io/chassis -``` - -For example, if the following labels were added to a node: - -```console -kubectl label node mynode topology.kubernetes.io/zone=zone1 -kubectl label node mynode topology.rook.io/rack=zone1-rack1 -``` - -> For versions previous to K8s 1.17, use the topology key: failure-domain.beta.kubernetes.io/zone or region - -These labels would result in the following hierarchy for OSDs on that node (this command can be run in the Rook toolbox): - -```console -ceph osd tree -``` - ->``` ->ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF ->-1 0.01358 root default ->-5 0.01358 zone zone1 ->-4 0.01358 rack rack1 ->-3 0.01358 host mynode ->0 hdd 0.00679 osd.0 up 1.00000 1.00000 ->1 hdd 0.00679 osd.1 up 1.00000 1.00000 ->``` - -Ceph requires unique names at every level in the hierarchy (CRUSH map). For example, you cannot have two racks -with the same name that are in different zones. Racks in different zones must be named uniquely. - -Note that the `host` is added automatically to the hierarchy by Rook. The host cannot be specified with a topology label. -All topology labels are optional. - -> **HINT** When setting the node labels prior to `CephCluster` creation, these settings take immediate effect. However, applying this to an already deployed `CephCluster` requires removing each node from the cluster first and then re-adding it with new configuration to take effect. Do this node by node to keep your data safe! Check the result with `ceph osd tree` from the [Rook Toolbox](ceph-toolbox.md). The OSD tree should display the hierarchy for the nodes that already have been re-added. - -To utilize the `failureDomain` based on the node labels, specify the corresponding option in the [CephBlockPool](ceph-pool-crd.md) - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - failureDomain: rack # this matches the topology labels on nodes - replicated: - size: 3 -``` - -This configuration will split the replication of volumes across unique -racks in the data center setup. - -### Using PVC storage for monitors - -In the CRD specification below three monitors are created each using a 10Gi PVC -created by Rook using the `local-storage` storage class. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: false - volumeClaimTemplate: - spec: - storageClassName: local-storage - resources: - requests: - storage: 10Gi - dashboard: - enabled: true - storage: - useAllNodes: true - useAllDevices: true - deviceFilter: - config: - metadataDevice: - databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger) - journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger) - osdsPerDevice: "1" -``` - -### Using StorageClassDeviceSets - -In the CRD specification below, 3 OSDs (having specific placement and resource values) and 3 mons with each using a 10Gi PVC, are created by Rook using the `local-storage` storage class. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: false - volumeClaimTemplate: - spec: - storageClassName: local-storage - resources: - requests: - storage: 10Gi - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 - allowUnsupported: false - dashboard: - enabled: true - network: - hostNetwork: false - storage: - storageClassDeviceSets: - - name: set1 - count: 3 - portable: false - resources: - limits: - cpu: "500m" - memory: "4Gi" - requests: - cpu: "500m" - memory: "4Gi" - placement: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "rook.io/cluster" - operator: In - values: - - cluster1 - topologyKey: "topology.kubernetes.io/zone" - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - storageClassName: local-storage - volumeMode: Block - accessModes: - - ReadWriteOnce -``` - -### Dedicated metadata and wal device for OSD on PVC - -In the simplest case, Ceph OSD BlueStore consumes a single (primary) storage device. -BlueStore is the engine used by the OSD to store data. - -The storage device is normally used as a whole, occupying the full device that is managed directly by BlueStore. -It is also possible to deploy BlueStore across additional devices such as a DB device. -This device can be used for storing BlueStore’s internal metadata. -BlueStore (or rather, the embedded RocksDB) will put as much metadata as it can on the DB device to improve performance. -If the DB device fills up, metadata will spill back onto the primary device (where it would have been otherwise). -Again, it is only helpful to provision a DB device if it is faster than the primary device. - -You can have multiple `volumeClaimTemplates` where each might either represent a device or a metadata device. -So just taking the `storage` section this will give something like: - -```yaml - storage: - storageClassDeviceSets: - - name: set1 - count: 3 - portable: false - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, gp2) - storageClassName: gp2 - volumeMode: Block - accessModes: - - ReadWriteOnce - - metadata: - name: metadata - spec: - resources: - requests: - # Find the right size https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing - storage: 5Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, io1) - storageClassName: io1 - volumeMode: Block - accessModes: - - ReadWriteOnce -``` - -> **NOTE**: Note that Rook only supports three naming convention for a given template: - -* "data": represents the main OSD block device, where your data is being stored. -* "metadata": represents the metadata (including block.db and block.wal) device used to store the Ceph Bluestore database for an OSD. -* "wal": represents the block.wal device used to store the Ceph Bluestore database for an OSD. If this device is set, "metadata" device will refer specifically to block.db device. -It is recommended to use a faster storage class for the metadata or wal device, with a slower device for the data. -Otherwise, having a separate metadata device will not improve the performance. - -The bluestore partition has the following reference combinations supported by the ceph-volume utility: - -* A single "data" device. - - ```yaml - storage: - storageClassDeviceSets: - - name: set1 - count: 3 - portable: false - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, gp2) - storageClassName: gp2 - volumeMode: Block - accessModes: - - ReadWriteOnce - ``` - -* A "data" device and a "metadata" device. - - ```yaml - storage: - storageClassDeviceSets: - - name: set1 - count: 3 - portable: false - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, gp2) - storageClassName: gp2 - volumeMode: Block - accessModes: - - ReadWriteOnce - - metadata: - name: metadata - spec: - resources: - requests: - # Find the right size https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing - storage: 5Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, io1) - storageClassName: io1 - volumeMode: Block - accessModes: - - ReadWriteOnce - ``` - -* A "data" device and a "wal" device. -A WAL device can be used for BlueStore’s internal journal or write-ahead log (block.wal), it is only useful to use a WAL device if the device is faster than the primary device (data device). -There is no separate "metadata" device in this case, the data of main OSD block and block.db located in "data" device. - - ```yaml - storage: - storageClassDeviceSets: - - name: set1 - count: 3 - portable: false - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, gp2) - storageClassName: gp2 - volumeMode: Block - accessModes: - - ReadWriteOnce - - metadata: - name: wal - spec: - resources: - requests: - # Find the right size https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing - storage: 5Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, io1) - storageClassName: io1 - volumeMode: Block - accessModes: - - ReadWriteOnce - ``` - -* A "data" device, a "metadata" device and a "wal" device. - - ```yaml - storage: - storageClassDeviceSets: - - name: set1 - count: 3 - portable: false - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, gp2) - storageClassName: gp2 - volumeMode: Block - accessModes: - - ReadWriteOnce - - metadata: - name: metadata - spec: - resources: - requests: - # Find the right size https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing - storage: 5Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, io1) - storageClassName: io1 - volumeMode: Block - accessModes: - - ReadWriteOnce - - metadata: - name: wal - spec: - resources: - requests: - # Find the right size https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing - storage: 5Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, io1) - storageClassName: io1 - volumeMode: Block - accessModes: - - ReadWriteOnce - ``` - -To determine the size of the metadata block follow the [official Ceph sizing guide](https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing). - -With the present configuration, each OSD will have its main block allocated a 10GB device as well a 5GB device to act as a bluestore database. - -### External cluster - -**The minimum supported Ceph version for the External Cluster is Luminous 12.2.x.** - -The features available from the external cluster will vary depending on the version of Ceph. The following table shows the minimum version of Ceph for some of the features: - -| FEATURE | CEPH VERSION | -| -------------------------------------------- | ------------ | -| Dynamic provisioning RBD | 12.2.X | -| Configure extra CRDs (object, file, nfs)[^1] | 13.2.3 | -| Dynamic provisioning CephFS | 14.2.3 | - -[^1]: Configure an object store, shared filesystem, or NFS resources in the local cluster to connect to the external Ceph cluster - -#### Pre-requisites - -In order to configure an external Ceph cluster with Rook, we need to inject some information in order to connect to that cluster. -You can use the `cluster/examples/kubernetes/ceph/import-external-cluster.sh` script to achieve that. -The script will look for the following populated environment variables: - -* `NAMESPACE`: the namespace where the configmap and secrets should be injected -* `ROOK_EXTERNAL_FSID`: the fsid of the external Ceph cluster, it can be retrieved via the `ceph fsid` command -* `ROOK_EXTERNAL_CEPH_MON_DATA`: is a common-separated list of running monitors IP address along with their ports, e.g: `a=172.17.0.4:3300,b=172.17.0.5:3300,c=172.17.0.6:3300`. You don't need to specify all the monitors, you can simply pass one and the Operator will discover the rest. The name of the monitor is the name that appears in the `ceph status` output. - -Now, we need to give Rook a key to connect to the cluster in order to perform various operations such as health cluster check, CSI keys management etc... -It is recommended to generate keys with minimal access so the admin key does not need to be used by the external cluster. -In this case, the admin key is only needed to generate the keys that will be used by the external cluster. -But if the admin key is to be used by the external cluster, set the following variable: - -* `ROOK_EXTERNAL_ADMIN_SECRET`: **OPTIONAL:** the external Ceph cluster admin secret key, it can be retrieved via the `ceph auth get-key client.admin` command. - -> **WARNING**: If you plan to create CRs (pool, rgw, mds, nfs) in the external cluster, you **MUST** inject the client.admin keyring as well as injecting `cluster-external-management.yaml` - -**Example**: - -```console -export NAMESPACE=rook-ceph-external -export ROOK_EXTERNAL_FSID=3240b4aa-ddbc-42ee-98ba-4ea7b2a61514 -export ROOK_EXTERNAL_CEPH_MON_DATA=a=172.17.0.4:3300 -export ROOK_EXTERNAL_ADMIN_SECRET=AQC6Ylxdja+NDBAAB7qy9MEAr4VLLq4dCIvxtg== -``` - -If the Ceph admin key is not provided, the following script needs to be executed on a machine that can connect to the Ceph cluster using the Ceph admin key. -On that machine, run: - -```sh -. cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh -``` - -The script will source all the necessary environment variables for you. It assumes the namespace name is `rook-ceph-external`. -This can be changed by running the script like (assuming namespace name is `foo` this time): - -```sh -ns=foo . cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh -``` - -When done you can execute: `import-external-cluster.sh` to inject them in your Kubernetes cluster. - -> **WARNING**: Since only Ceph admin key can create CRs in the external cluster, please make sure that rgw pools have been prepared. You can get existing pools by running `ceph osd pool ls`. - -**Example**: - -```console -ceph osd pool ls -``` ->``` ->my-store.rgw.control ->my-store.rgw.meta ->my-store.rgw.log ->my-store.rgw.buckets.index ->my-store.rgw.buckets.non-ec ->my-store.rgw.buckets.data ->``` - -In this example, you can simply export RGW_POOL_PREFIX before executing the script like this: - -```console -export RGW_POOL_PREFIX=my-store -``` - -The script will automatically create users and keys with the lowest possible privileges and populate the necessary environment variables for `cluster/examples/kubernetes/ceph/import-external-cluster.sh` to work correctly. - -Finally, you can simply execute the script like this from a machine that has access to your Kubernetes cluster: - -```console -bash cluster/examples/kubernetes/ceph/import-external-cluster.sh -``` - -#### CephCluster example (consumer) - -Assuming the above section has successfully completed, here is a CR example: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph-external - namespace: rook-ceph-external -spec: - external: - enable: true - crashCollector: - disable: true - # optionally, the ceph-mgr IP address can be pass to gather metric from the prometheus exporter - #monitoring: - #enabled: true - #rulesNamespace: rook-ceph - #externalMgrEndpoints: - #- ip: 192.168.39.182 - #externalMgrPrometheusPort: 9283 -``` - -Choose the namespace carefully, if you have an existing cluster managed by Rook, you have likely already injected `common.yaml`. -Additionally, you now need to inject `common-external.yaml` too. - -You can now create it like this: - -```console -kubectl create -f cluster/examples/kubernetes/ceph/cluster-external.yaml -``` - -If the previous section has not been completed, the Rook Operator will still acknowledge the CR creation but will wait forever to receive connection information. - -> **WARNING**: If no cluster is managed by the current Rook Operator, you need to inject `common.yaml`, then modify `cluster-external.yaml` and specify `rook-ceph` as `namespace`. - -If this is successful you will see the CepCluster status as connected. - -```console -kubectl get CephCluster -n rook-ceph-external -``` - ->``` ->NAME DATADIRHOSTPATH MONCOUNT AGE STATE HEALTH ->rook-ceph-external /var/lib/rook 162m Connected HEALTH_OK ->``` - -Before you create a StorageClass with this cluster you will need to create a Pool in your external Ceph Cluster. - -#### Example StorageClass based on external Ceph Pool - -In Ceph Cluster let us list the pools available: - -```console -rados df -``` - ->``` ->POOL_NAME USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS RD WR_OPS WR USED COMPR UNDER COMPR ->replicated_2g 0 B 0 0 0 0 0 0 0 0 B 0 0 B 0 B 0 B -> ``` - -Here is an example StorageClass configuration that uses the `replicated_2g` pool from the external cluster: - -```console -cat << EOF | kubectl apply -f - -``` - ->``` ->apiVersion: storage.k8s.io/v1 ->kind: StorageClass ->metadata: -> name: rook-ceph-block-ext -># Change "rook-ceph" provisioner prefix to match the operator namespace if needed ->provisioner: rook-ceph.rbd.csi.ceph.com ->parameters: -> # clusterID is the namespace where the rook cluster is running -> clusterID: rook-ceph-external -> # Ceph pool into which the RBD image shall be created -> pool: replicated_2g -> -> # RBD image format. Defaults to "2". -> imageFormat: "2" -> -> # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. -> imageFeatures: layering -> -> # The secrets contain Ceph admin credentials. -> csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner -> csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph-external -> csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner -> csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph-external -> csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node -> csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph-external -> -> # Specify the filesystem type of the volume. If not specified, csi-provisioner -> # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock -> # in hyperconverged settings where the volume is mounted on the same node as the osds. -> csi.storage.k8s.io/fstype: ext4 -> -># Delete the rbd volume when a PVC is deleted ->reclaimPolicy: Delete ->allowVolumeExpansion: true ->EOF ->``` - -You can now create a persistent volume based on this StorageClass. - -#### CephCluster example (management) - -The following CephCluster CR represents a cluster that will perform management tasks on the external cluster. -It will not only act as a consumer but will also allow the deployment of other CRDs such as CephFilesystem or CephObjectStore. -As mentioned above, you would need to inject the admin keyring for that. - -The corresponding YAML example: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph-external - namespace: rook-ceph-external -spec: - external: - enable: true - dataDirHostPath: /var/lib/rook - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 # Should match external cluster version -``` - -### Security - -Rook has the ability to encrypt OSDs of clusters running on PVC via the flag (`encrypted: true`) in your `storageClassDeviceSets` [template](#pvc-based-cluster). -By default, the Key Encryption Keys (also known as Data Encryption Keys) are stored in a Kubernetes Secret. - -However, if a Key Management System exists Rook is capable of using it. HashiCorp Vault is the only KMS currently supported by Rook. -Please refer to the next section. - -The `security` section contains settings related to encryption of the cluster. - -* `security`: - * `kms`: Key Management System settings - * `connectionDetails`: the list of parameters representing kms connection details - * `tokenSecretName`: the name of the Kubernetes Secret containing the kms authentication token - -#### Vault KMS - -In order for Rook to connect to Vault, you must configure the following in your `CephCluster` template: - -```yaml -security: - kms: - # name of the k8s config map containing all the kms connection details - connectionDetails: - KMS_PROVIDER: vault - VAULT_ADDR: https://vault.default.svc.cluster.local:8200 - VAULT_BACKEND_PATH: rook - VAULT_SECRET_ENGINE: kv - # name of the k8s secret containing the kms authentication token - tokenSecretName: rook-vault-token -``` - -Note: Rook supports **all** the Vault [environment variables](https://www.vaultproject.io/docs/commands#environment-variables). - -The Kubernetes Secret `rook-vault-token` should contain: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: rook-vault-token - namespace: rook-ceph -data: - token: # base64 of a token to connect to Vault, for example: cy5GWXpsbzAyY2duVGVoRjhkWG5Bb3EyWjkK -``` - -As part of the token, here is an example of a policy that can be used: - -```hcl -path "rook/*" { - capabilities = ["create", "read", "update", "delete", "list"] -} -path "sys/mounts" { -capabilities = ["read"] -} -``` - -You can write the policy like so and then create a token: - -```console -vault policy write rook /tmp/rook.hcl -vault token create -policy=rook -``` ->``` ->Key Value ->--- ----- ->token s.FYzlo02cgnTehF8dXnAoq2Z9 ->token_accessor oMo7sAXQKbYtxU4HtO8k3pko ->token_duration 768h ->token_renewable true ->token_policies ["default" "rook"] ->identity_policies [] ->policies ["default" "rook"] ->``` - -In this example the backend path named `rook` is used it must be enabled in Vault with the following: - -```console -vault secrets enable -path=rook kv -``` - -If a different path is used, the `VAULT_BACKEND_PATH` key in `connectionDetails` must be changed. - -Currently the token-based authentication is the only supported method. -Later Rook is planning on supporting the [Vault Kubernetes native authentication](https://www.vaultproject.io/docs/auth/kubernetes). - -##### TLS configuration - -This is an advanced but recommended configuration for production deployments, in this case the `vault-connection-details` will look like: - -```yaml -security: - kms: - # name of the k8s config map containing all the kms connection details - connectionDetails: - KMS_PROVIDER: vault - VAULT_ADDR: https://vault.default.svc.cluster.local:8200 - VAULT_CACERT: - VAULT_CLIENT_CERT: - VAULT_CLIENT_KEY: - # name of the k8s secret containing the kms authentication token - tokenSecretName: rook-vault-token -``` - -Each secret keys are expected to be: - -* VAULT_CACERT: `cert` -* VAULT_CLIENT_CERT: `cert` -* VAULT_CLIENT_KEY: `key` - -For instance `VAULT_CACERT` Secret named `vault-tls-ca-certificate` will look like: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: vault-tls-ca-certificate - namespace: rook-ceph -data: - cert: -``` - -Note: if you are using self-signed certificates (not known/approved by a proper CA) you must pass `VAULT_SKIP_VERIFY: true`. -Communications will remain encrypted but the validity of the certificate will not be verified. - -### Deleting a CephCluster - -During deletion of a CephCluster resource, Rook protects against accidental or premature destruction -of user data by blocking deletion if there are any other Rook-Ceph Custom Resources that reference -the CephCluster being deleted. Rook will warn about which other resources are blocking deletion in -three ways until all blocking resources are deleted: -1. An event will be registered on the CephCluster resource -1. A status condition will be added to the CephCluster resource -1. An error will be added to the Rook-Ceph Operator log - -#### Cleanup policy - -Rook has the ability to cleanup resources and data that were deployed when a CephCluster is removed. -The policy settings indicate which data should be forcibly deleted and in what way the data should be wiped. -The `cleanupPolicy` has several fields: - -* `confirmation`: Only an empty string and `yes-really-destroy-data` are valid values for this field. - If this setting is empty, the cleanupPolicy settings will be ignored and Rook will not cleanup any resources during cluster removal. - To reinstall the cluster, the admin would then be required to follow the [cleanup guide](ceph-teardown.md) to delete the data on hosts. - If this setting is `yes-really-destroy-data`, the operator will automatically delete the data on hosts. - Because this cleanup policy is destructive, after the confirmation is set to `yes-really-destroy-data` - Rook will stop configuring the cluster as if the cluster is about to be destroyed. -* `sanitizeDisks`: sanitizeDisks represents advanced settings that can be used to delete data on drives. - * `method`: indicates if the entire disk should be sanitized or simply ceph's metadata. Possible choices are 'quick' (default) or 'complete' - * `dataSource`: indicate where to get random bytes from to write on the disk. Possible choices are 'zero' (default) or 'random'. - Using random sources will consume entropy from the system and will take much more time then the zero source - * `iteration`: overwrite N times instead of the default (1). Takes an integer value -* `allowUninstallWithVolumes`: If set to true, then the cephCluster deletion doesn't wait for the PVCs to be deleted. Default is false. - -To automate activation of the cleanup, you can use the following command. **WARNING: DATA WILL BE PERMANENTLY DELETED**: - -```console -kubectl -n rook-ceph patch cephcluster rook-ceph --type merge -p '{"spec":{"cleanupPolicy":{"confirmation":"yes-really-destroy-data"}}}' -``` - -Nothing will happen until the deletion of the CR is requested, so this can still be reverted. -However, all new configuration by the operator will be blocked with this cleanup policy enabled. - -Rook waits for the deletion of PVs provisioned using the cephCluster before proceeding to delete the -cephCluster. To force deletion of the cephCluster without waiting for the PVs to be deleted, you can -set the allowUninstallWithVolumes to true under spec.CleanupPolicy. diff --git a/Documentation/ceph-common-issues.md b/Documentation/ceph-common-issues.md deleted file mode 100644 index beedb1693..000000000 --- a/Documentation/ceph-common-issues.md +++ /dev/null @@ -1,1007 +0,0 @@ ---- -title: Common Issues -weight: 11120 -indent: true ---- - -# Ceph Common Issues - -Many of these problem cases are hard to summarize down to a short phrase that adequately describes the problem. Each problem will start with a bulleted list of symptoms. Keep in mind that all symptoms may not apply depending on the configuration of Rook. If the majority of the symptoms are seen there is a fair chance you are experiencing that problem. - -If after trying the suggestions found on this page and the problem is not resolved, the Rook team is very happy to help you troubleshoot the issues in their Slack channel. Once you have [registered for the Rook Slack](https://slack.rook.io), proceed to the `#ceph` channel to ask for assistance. - -## Table of Contents - -* [Troubleshooting Techniques](#troubleshooting-techniques) -* [Pod Using Ceph Storage Is Not Running](#pod-using-ceph-storage-is-not-running) -* [Cluster failing to service requests](#cluster-failing-to-service-requests) -* [Monitors are the only pods running](#monitors-are-the-only-pods-running) -* [PVCs stay in pending state](#pvcs-stay-in-pending-state) -* [OSD pods are failing to start](#osd-pods-are-failing-to-start) -* [OSD pods are not created on my devices](#osd-pods-are-not-created-on-my-devices) -* [Node hangs after reboot](#node-hangs-after-reboot) -* [Rook Agent modprobe exec format error](#rook-agent-modprobe-exec-format-error) -* [Rook Agent rbd module missing error](#rook-agent-rbd-module-missing-error) -* [Using multiple shared filesystem (CephFS) is attempted on a kernel version older than 4.7](#using-multiple-shared-filesystem-cephfs-is-attempted-on-a-kernel-version-older-than-47) -* [Set debug log level for all Ceph daemons](#set-debug-log-level-for-all-ceph-daemons) -* [Activate log to file for a particular Ceph daemon](#activate-log-to-file-for-a-particular-ceph-daemon) -* [A worker node using RBD devices hangs up](#a-worker-node-using-rbd-devices-hangs-up) -* [Too few PGs per OSD warning is shown](#too-few-pgs-per-osd-warning-is-shown) -* [LVM metadata can be corrupted with OSD on LV-backed PVC](#lvm-metadata-can-be-corrupted-with-osd-on-lv-backed-pvc) -* [OSD prepare job fails due to low aio-max-nr setting](#osd-prepare-job-fails-due-to-low-aio-max-nr-setting) -* [Failed to create CRDs](#failed-to-create-crds) -* [Unexpected partitions created](#unexpected-partitions-created) - -See also the [CSI Troubleshooting Guide](ceph-csi-troubleshooting.md). - -## Troubleshooting Techniques - -There are two main categories of information you will need to investigate issues in the cluster: - -1. Kubernetes status and logs documented [here](common-issues.md) -1. Ceph cluster status (see upcoming [Ceph tools](#ceph-tools) section) - -### Ceph Tools - -After you verify the basic health of the running pods, next you will want to run Ceph tools for status of the storage components. There are two ways to run the Ceph tools, either in the Rook toolbox or inside other Rook pods that are already running. - -* Logs on a specific node to find why a PVC is failing to mount: - * Rook agent errors around the attach/detach: `kubectl logs -n rook-ceph ` -* See the [log collection topic](ceph-advanced-configuration.md#log-collection) for a script that will help you gather the logs -* Other artifacts: - * The monitors that are expected to be in quorum: `kubectl -n get configmap rook-ceph-mon-endpoints -o yaml | grep data` - -#### Tools in the Rook Toolbox - -The [rook-ceph-tools pod](./ceph-toolbox.md) provides a simple environment to run Ceph tools. Once the pod is up and running, connect to the pod to execute Ceph commands to evaluate that current state of the cluster. - -```console -kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') bash -``` - -#### Ceph Commands - -Here are some common commands to troubleshoot a Ceph cluster: - -* `ceph status` -* `ceph osd status` -* `ceph osd df` -* `ceph osd utilization` -* `ceph osd pool stats` -* `ceph osd tree` -* `ceph pg stat` - -The first two status commands provide the overall cluster health. The normal state for cluster operations is HEALTH_OK, but will still function when the state is in a HEALTH_WARN state. If you are in a WARN state, then the cluster is in a condition that it may enter the HEALTH_ERROR state at which point *all* disk I/O operations are halted. If a HEALTH_WARN state is observed, then one should take action to prevent the cluster from halting when it enters the HEALTH_ERROR state. - -There are many Ceph sub-commands to look at and manipulate Ceph objects, well beyond the scope this document. See the [Ceph documentation](https://docs.ceph.com/) for more details of gathering information about the health of the cluster. In addition, there are other helpful hints and some best practices located in the [Advanced Configuration section](advanced-configuration.md). Of particular note, there are scripts for collecting logs and gathering OSD information there. - -## Pod Using Ceph Storage Is Not Running - -> This topic is specific to creating PVCs based on Rook's **Flex** driver, which is no longer the default option. -> By default, Rook deploys the CSI driver for binding the PVCs to the storage. - -### Symptoms - -* The pod that is configured to use Rook storage is stuck in the `ContainerCreating` status -* `kubectl describe pod` for the pod mentions one or more of the following: - * `PersistentVolumeClaim is not bound` - * `timeout expired waiting for volumes to attach/mount` -* `kubectl -n rook-ceph get pod` shows the rook-ceph-agent pods in a `CrashLoopBackOff` status - -If you see that the PVC remains in **pending** state, see the topic [PVCs stay in pending state](#pvcs-stay-in-pending-state). - -### Possible Solutions Summary - -* `rook-ceph-agent` pod is in a `CrashLoopBackOff` status because it cannot deploy its driver on a read-only filesystem: [Flexvolume configuration pre-reqs](./ceph-prerequisites.md#ceph-flexvolume-configuration) -* Persistent Volume and/or Claim are failing to be created and bound: [Volume Creation](#volume-creation) -* `rook-ceph-agent` pod is failing to mount and format the volume: [Rook Agent Mounting](#volume-mounting) - -### Investigation Details - -If you see some of the symptoms above, it's because the requested Rook storage for your pod is not being created and mounted successfully. -In this walkthrough, we will be looking at the wordpress mysql example pod that is failing to start. - -To first confirm there is an issue, you can run commands similar to the following and you should see similar output (note that some of it has been omitted for brevity): - -```console -kubectl get pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->wordpress-mysql-918363043-50pjr 0/1 ContainerCreating 0 1h ->``` - -```console -$ kubectl describe pod wordpress-mysql-918363043-50pjr -``` - ->``` ->... ->Events: -> FirstSeen LastSeen Count From SubObjectPath Type Reason Message -> --------- -------- ----- ---- ------------- -------- ------ ------- -> 1h 1h 3 default-scheduler Warning FailedScheduling PersistentVolumeClaim is not bound: "mysql-pv-claim" (repeated 2 times) -> 1h 35s 36 kubelet, 172.17.8.101 Warning FailedMount Unable to mount volumes for pod "wordpress-mysql-918363043-50pjr_default(08d14e75-bd99-11e7-bc4c-001c428b9fc8)": timeout expired waiting for volumes to attach/mount for pod "default"/"wordpress-mysql-918363043-50pjr". list of unattached/unmounted volumes=[mysql-persistent-storage] -> 1h 35s 36 kubelet, 172.17.8.101 Warning FailedSync Error syncing pod ->``` - -To troubleshoot this, let's walk through the volume provisioning steps in order to confirm where the failure is happening. - -#### Ceph Agent Deployment - -The `rook-ceph-agent` pods are responsible for mapping and mounting the volume from the cluster onto the node that your pod will be running on. -If the `rook-ceph-agent` pod is not running then it cannot perform this function. - -Below is an example of the `rook-ceph-agent` pods failing to get to the `Running` status because they are in a `CrashLoopBackOff` status: - -```console -kubectl -n rook-ceph get pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-ceph-agent-ct5pj 0/1 CrashLoopBackOff 16 59m ->rook-ceph-agent-zb6n9 0/1 CrashLoopBackOff 16 59m ->rook-operator-2203999069-pmhzn 1/1 Running 0 59m ->``` - -If you see this occurring, you can get more details about why the `rook-ceph-agent` pods are continuing to crash with the following command and its sample output: - -```console -kubectl -n rook-ceph get pod -l app=rook-ceph-agent -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.containerStatuses[0].lastState.terminated.message}{"\n"}{end}' -``` ->``` ->rook-ceph-agent-ct5pj mkdir /usr/libexec/kubernetes: read-only filesystem ->rook-ceph-agent-zb6n9 mkdir /usr/libexec/kubernetes: read-only filesystem ->``` - -From the output above, we can see that the agents were not able to bind mount to `/usr/libexec/kubernetes` on the host they are scheduled to run on. -For some environments, this default path is read-only and therefore a better path must be provided to the agents. - -First, clean up the agent deployment with: - -```console -kubectl -n rook-ceph delete daemonset rook-ceph-agent -``` - -Once the `rook-ceph-agent` pods are gone, **follow the instructions in the [Flexvolume configuration pre-reqs](./ceph-prerequisites.md#ceph-flexvolume-configuration)** to ensure a good value for `--volume-plugin-dir` has been provided to the Kubelet. -After that has been configured, and the Kubelet has been restarted, start the agent pods up again by restarting `rook-operator`: - -```console -kubectl -n rook-ceph delete pod -l app=rook-ceph-operator -``` - -#### Volume Creation - -The volume must first be created in the Rook cluster and then bound to a volume claim before it can be mounted to a pod. -Let's confirm that with the following commands and their output: - -```console -kubectl get pv -``` - ->``` ->NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM STORAGECLASS REASON AGE ->pvc-9f273fbc-bdbf-11e7-bc4c-001c428b9fc8 20Gi RWO Delete Bound default/mysql-pv-claim rook-ceph-block 25m ->``` - -```console -kubectl get pvc -``` - ->``` ->NAME STATUS VOLUME CAPACITY ACCESSMODES STORAGECLASS AGE ->mysql-pv-claim Bound pvc-9f273fbc-bdbf-11e7-bc4c-001c428b9fc8 20Gi RWO rook-ceph-block 25m ->``` - -Both your volume and its claim should be in the `Bound` status. -If one or neither of them is not in the `Bound` status, then look for details of the issue in the `rook-operator` logs: - -```console -kubectl -n rook-ceph logs `kubectl -n rook-ceph -l app=rook-ceph-operator get pods -o jsonpath='{.items[*].metadata.name}'` -``` - -If the volume is failing to be created, there should be details in the `rook-operator` log output, especially those tagged with `op-provisioner`. - -One common cause for the `rook-operator` failing to create the volume is when the `clusterNamespace` field of the `StorageClass` doesn't match the **namespace** of the Rook cluster, as described in [#1502](https://github.com/rook/rook/issues/1502). -In that scenario, the `rook-operator` log would show a failure similar to the following: - ->``` ->2018-03-28 18:58:32.041603 I | op-provisioner: creating volume with configuration {pool:replicapool clusterNamespace:rook-ceph fstype:} ->2018-03-28 18:58:32.041728 I | exec: Running command: rbd create replicapool/pvc-fd8aba49-32b9-11e8-978e-08002762c796 --size 20480 --cluster=rook --conf=/var/lib/rook/rook-ceph/rook.config --keyring=/var/lib/rook/rook-ceph/client.admin.keyring ->E0328 18:58:32.060893 5 controller.go:801] Failed to provision volume for claim "default/mysql-pv-claim" with StorageClass "rook-ceph-block": Failed to create rook block image replicapool/pvc-fd8aba49-32b9-11e8-978e-08002762c796: failed to create image pvc-fd8aba49-32b9-11e8-978e-08002762c796 in pool replicapool of size 21474836480: Failed to complete '': exit status 1. global_init: unable to open config file from search list /var/lib/rook/rook-ceph/rook.config ->. output: ->``` - -The solution is to ensure that the [`clusterNamespace`](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/flex/storageclass.yaml#L28) field matches the **namespace** of the Rook cluster when creating the `StorageClass`. - -#### Volume Mounting - -The final step in preparing Rook storage for your pod is for the `rook-ceph-agent` pod to mount and format it. -If all the preceding sections have been successful or inconclusive, then take a look at the `rook-ceph-agent` pod logs for further clues. -You can determine which `rook-ceph-agent` is running on the same node that your pod is scheduled on by using the `-o wide` output, then you can get the logs for that `rook-ceph-agent` pod similar to the example below: - -```console -kubectl -n rook-ceph get pod -o wide -``` - ->``` ->NAME READY STATUS RESTARTS AGE IP NODE ->rook-ceph-agent-h6scx 1/1 Running 0 9m 172.17.8.102 172.17.8.102 ->rook-ceph-agent-mp7tn 1/1 Running 0 9m 172.17.8.101 172.17.8.101 ->rook-operator-2203999069-3tb68 1/1 Running 0 9m 10.32.0.7 172.17.8.101 ->``` - -```console -$ kubectl -n rook-ceph logs rook-ceph-agent-h6scx -``` - ->``` ->2017-10-30 23:07:06.984108 I | rook: starting Rook v0.5.0-241.g48ce6de.dirty with arguments '/usr/local/bin/rook agent' ->[...] ->``` - -In the `rook-ceph-agent` pod logs, you may see a snippet similar to the following: - ->``` ->Failed to complete rbd: signal: interrupt. ->``` - -In this case, the agent waited for the `rbd` command but it did not finish in a timely manner so the agent gave up and stopped it. -This can happen for multiple reasons, but using `dmesg` will likely give you insight into the root cause. -If `dmesg` shows something similar to below, then it means you have an old kernel that can't talk to the cluster: - ->``` ->libceph: mon2 10.205.92.13:6789 feature set mismatch, my 4a042a42 < server's 2004a042a42, missing 20000000000 ->``` - -If `uname -a` shows that you have a kernel version older than `3.15`, you'll need to perform **one** of the following: - -* Disable some Ceph features by starting the [rook toolbox](./ceph-toolbox.md) and running `ceph osd crush tunables bobtail` -* Upgrade your kernel to `3.15` or later. - -#### Filesystem Mounting - -In the `rook-ceph-agent` pod logs, you may see a snippet similar to the following: - ->``` ->2017-11-07 00:04:37.808870 I | rook-flexdriver: WARNING: The node kernel version is 4.4.0-87-generic, which do not support multiple ceph filesystems. The kernel version has to be at least 4.7. If you have multiple ceph filesystems, the result could be inconsistent ->``` - -This will happen in kernels with versions older than 4.7, where the option `mds_namespace` is not supported. This option is used to specify a filesystem namespace. - -In this case, if there is only one filesystem in the Rook cluster, there should be no issues and the mount should succeed. If you have more than one filesystem, inconsistent results may arise and the filesystem mounted may not be the one you specified. - -If the issue is still not resolved from the steps above, please come chat with us on the **#general** channel of our [Rook Slack](https://slack.rook.io). -We want to help you get your storage working and learn from those lessons to prevent users in the future from seeing the same issue. - -## Cluster failing to service requests - -### Symptoms - -* Execution of the `ceph` command hangs -* PersistentVolumes are not being created -* Large amount of slow requests are blocking -* Large amount of stuck requests are blocking -* One or more MONs are restarting periodically - -### Investigation - -Create a [rook-ceph-tools pod](ceph-toolbox.md) to investigate the current state of Ceph. Here is an example of what one might see. In this case the `ceph status` command would just hang so a CTRL-C needed to be sent. - -```console -kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph status - -ceph status -^CCluster connection interrupted or timed out -``` - -Another indication is when one or more of the MON pods restart frequently. Note the 'mon107' that has only been up for 16 minutes in the following output. - -```console -kubectl -n rook-ceph get all -o wide --show-all -``` - ->``` ->NAME READY STATUS RESTARTS AGE IP NODE ->po/rook-ceph-mgr0-2487684371-gzlbq 1/1 Running 0 17h 192.168.224.46 k8-host-0402 ->po/rook-ceph-mon107-p74rj 1/1 Running 0 16m 192.168.224.28 k8-host-0402 ->rook-ceph-mon1-56fgm 1/1 Running 0 2d 192.168.91.135 k8-host-0404 ->rook-ceph-mon2-rlxcd 1/1 Running 0 2d 192.168.123.33 k8-host-0403 ->rook-ceph-osd-bg2vj 1/1 Running 0 2d 192.168.91.177 k8-host-0404 ->rook-ceph-osd-mwxdm 1/1 Running 0 2d 192.168.123.31 k8-host-0403 ->``` - -### Solution - -What is happening here is that the MON pods are restarting and one or more of the Ceph daemons are not getting configured with the proper cluster information. This is commonly the result of not specifying a value for `dataDirHostPath` in your Cluster CRD. - -The `dataDirHostPath` setting specifies a path on the local host for the Ceph daemons to store configuration and data. Setting this to a path like `/var/lib/rook`, reapplying your Cluster CRD and restarting all the Ceph daemons (MON, MGR, OSD, RGW) should solve this problem. After the Ceph daemons have been restarted, it is advisable to restart the [rook-tool pod](./toolbox.md). - -## Monitors are the only pods running - -### Symptoms - -* Rook operator is running -* Either a single mon starts or the mons start very slowly (at least several minutes apart) -* The crash-collector pods are crashing -* No mgr, osd, or other daemons are created except the CSI driver - -### Investigation - -When the operator is starting a cluster, the operator will start one mon at a time and check that they are healthy before continuing to bring up all three mons. -If the first mon is not detected healthy, the operator will continue to check until it is healthy. If the first mon fails to start, a second and then a third -mon may attempt to start. However, they will never form quorum and the orchestration will be blocked from proceeding. - -The crash-collector pods will be blocked from starting until the mons have formed quorum the first time. - -There are several common causes for the mons failing to form quorum: - -* The operator pod does not have network connectivity to the mon pod(s). The network may be configured incorrectly. -* One or more mon pods are in running state, but the operator log shows they are not able to form quorum -* A mon is using configuration from a previous installation. See the [cleanup guide](ceph-teardown.md#delete-the-data-on-hosts) - for cleaning the previous cluster. -* A firewall may be blocking the ports required for the Ceph mons to form quorum. Ensure ports 6789 and 3300 are enabled. - See the [Ceph networking guide](https://docs.ceph.com/en/latest/rados/configuration/network-config-ref/) for more details. - -#### Operator fails to connect to the mon - -First look at the logs of the operator to confirm if it is able to connect to the mons. - -```console -kubectl -n rook-ceph logs -l app=rook-ceph-operator -``` - -Likely you will see an error similar to the following that the operator is timing out when connecting to the mon. The last command is `ceph mon_status`, -followed by a timeout message five minutes later. - ->``` ->2018-01-21 21:47:32.375833 I | exec: Running command: ceph mon_status --cluster=rook --conf=/var/lib/rook/rook-ceph/rook.config --keyring=/var/lib/rook/rook-ceph/client.admin.keyring --format json --out-file /tmp/442263890 ->2018-01-21 21:52:35.370533 I | exec: 2018-01-21 21:52:35.071462 7f96a3b82700 0 monclient(hunting): authenticate timed out after 300 ->2018-01-21 21:52:35.071462 7f96a3b82700 0 monclient(hunting): authenticate timed out after 300 ->2018-01-21 21:52:35.071524 7f96a3b82700 0 librados: client.admin authentication error (110) Connection timed out ->2018-01-21 21:52:35.071524 7f96a3b82700 0 librados: client.admin authentication error (110) Connection timed out ->[errno 110] error connecting to the cluster ->``` - -The error would appear to be an authentication error, but it is misleading. The real issue is a timeout. - -#### Solution - -If you see the timeout in the operator log, verify if the mon pod is running (see the next section). -If the mon pod is running, check the network connectivity between the operator pod and the mon pod. -A common issue is that the CNI is not configured correctly. - -To verify the network connectivity: -- Get the endpoint for a mon -- Curl the mon from the operator pod - -For example, this command will curl the first mon from the operator: - -``` -kubectl -n rook-ceph exec deploy/rook-ceph-operator -- curl $(kubectl -n rook-ceph get svc -l app=rook-ceph-mon -o jsonpath='{.items[0].spec.clusterIP}'):3300 2>/dev/null -``` - ->``` ->ceph v2 ->``` - -If "ceph v2" is printed to the console, the connection was successful. If the command does not respond or -otherwise fails, the network connection cannot be established. - -#### Failing mon pod - -Second we need to verify if the mon pod started successfully. - -```console -kubectl -n rook-ceph get pod -l app=rook-ceph-mon -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-ceph-mon-a-69fb9c78cd-58szd 1/1 CrashLoopBackOff 2 47s ->``` - -If the mon pod is failing as in this example, you will need to look at the mon pod status or logs to determine the cause. If the pod is in a crash loop backoff state, -you should see the reason by describing the pod. - -```console -# The pod shows a termination status that the keyring does not match the existing keyring -kubectl -n rook-ceph describe pod -l mon=rook-ceph-mon0 -``` - ->``` ->... -> Last State: Terminated -> Reason: Error -> Message: The keyring does not match the existing keyring in /var/lib/rook/rook-ceph-mon0/data/keyring. -> You may need to delete the contents of dataDirHostPath on the host from a previous deployment. ->... ->``` - -See the solution in the next section regarding cleaning up the `dataDirHostPath` on the nodes. - -#### Solution - -This is a common problem reinitializing the Rook cluster when the local directory used for persistence has **not** been purged. -This directory is the `dataDirHostPath` setting in the cluster CRD and is typically set to `/var/lib/rook`. -To fix the issue you will need to delete all components of Rook and then delete the contents of `/var/lib/rook` (or the directory specified by `dataDirHostPath`) on each of the hosts in the cluster. -Then when the cluster CRD is applied to start a new cluster, the rook-operator should start all the pods as expected. - -> **IMPORTANT: Deleting the `dataDirHostPath` folder is destructive to the storage. Only delete the folder if you are trying to permanently purge the Rook cluster.** - -See the [Cleanup Guide](ceph-teardown.md) for more details. - -## PVCs stay in pending state - -### Symptoms - -* When you create a PVC based on a rook storage class, it stays pending indefinitely - -For the Wordpress example, you might see two PVCs in pending state. - -```console -kubectl get pvc -``` - ->``` ->NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE ->mysql-pv-claim Pending rook-ceph-block 8s ->wp-pv-claim Pending rook-ceph-block 16s ->``` - -### Investigation - -There are two common causes for the PVCs staying in pending state: - -1. There are no OSDs in the cluster -2. The CSI provisioner pod is not running or is not responding to the request to provision the storage - -If you are still using the Rook flex driver for the volumes (the CSI driver is the default since Rook v1.1), -another cause could be that the operator is not running or is otherwise not responding to the request to provision the storage. - -#### Confirm if there are OSDs - -To confirm if you have OSDs in your cluster, connect to the [Rook Toolbox](ceph-toolbox.md) and run the `ceph status` command. -You should see that you have at least one OSD `up` and `in`. The minimum number of OSDs required depends on the -`replicated.size` setting in the pool created for the storage class. In a "test" cluster, only one OSD is required -(see `storageclass-test.yaml`). In the production storage class example (`storageclass.yaml`), three OSDs would be required. - -```console -ceph status -``` - ->``` -> cluster: -> id: a0452c76-30d9-4c1a-a948-5d8405f19a7c -> health: HEALTH_OK -> -> services: -> mon: 3 daemons, quorum a,b,c (age 11m) -> mgr: a(active, since 10m) -> osd: 1 osds: 1 up (since 46s), 1 in (since 109m) ->``` - -#### OSD Prepare Logs - -If you don't see the expected number of OSDs, let's investigate why they weren't created. -On each node where Rook looks for OSDs to configure, you will see an "osd prepare" pod. - -```console -kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -``` - ->``` ->NAME ... READY STATUS RESTARTS AGE ->rook-ceph-osd-prepare-minikube-9twvk 0/2 Completed 0 30m ->``` - -See the section on [why OSDs are not getting created](#osd-pods-are-not-created-on-my-devices) to investigate the logs. - -#### CSI Driver - -The CSI driver may not be responding to the requests. Look in the logs of the CSI provisioner pod to see if there are any errors -during the provisioning. - -There are two provisioner pods: - -``` -kubectl -n rook-ceph get pod -l app=csi-rbdplugin-provisioner -``` - -Get the logs of each of the pods. One of them should be the "leader" and be responding to requests. - -``` -kubectl -n rook-ceph logs csi-cephfsplugin-provisioner-d77bb49c6-q9hwq csi-provisioner -``` - -See also the [CSI Troubleshooting Guide](ceph-csi-troubleshooting.md). - - -#### Operator unresponsiveness - -Lastly, if you have OSDs `up` and `in`, the next step is to confirm the operator is responding to the requests. -Look in the Operator pod logs around the time when the PVC was created to confirm if the request is being raised. -If the operator does not show requests to provision the block image, the operator may be stuck on some other operation. -In this case, restart the operator pod to get things going again. - -### Solution - -If the "osd prepare" logs didn't give you enough clues about why the OSDs were not being created, -please review your [cluster.yaml](eph-cluster-crd.html#storage-selection-settings) configuration. -The common misconfigurations include: - -* If `useAllDevices: true`, Rook expects to find local devices attached to the nodes. If no devices are found, no OSDs will be created. -* If `useAllDevices: false`, OSDs will only be created if `deviceFilter` is specified. -* Only local devices attached to the nodes will be configurable by Rook. In other words, the devices must show up under `/dev`. - * The devices must not have any partitions or filesystems on them. Rook will only configure raw devices. Partitions are not yet supported. - -## OSD pods are failing to start - -### Symptoms - -* OSD pods are failing to start -* You have started a cluster after tearing down another cluster - -### Investigation - -When an OSD starts, the device or directory will be configured for consumption. If there is an error with the configuration, the pod will crash and you will see the CrashLoopBackoff -status for the pod. Look in the osd pod logs for an indication of the failure. - -```console -$ kubectl -n rook-ceph logs rook-ceph-osd-fl8fs -... -``` - -One common case for failure is that you have re-deployed a test cluster and some state may remain from a previous deployment. -If your cluster is larger than a few nodes, you may get lucky enough that the monitors were able to start and form quorum. However, now the OSDs pods may fail to start due to the -old state. Looking at the OSD pod logs you will see an error about the file already existing. - -```console -$ kubectl -n rook-ceph logs rook-ceph-osd-fl8fs -``` - ->``` ->... ->2017-10-31 20:13:11.187106 I | mkfs-osd0: 2017-10-31 20:13:11.186992 7f0059d62e00 -1 bluestore(/var/lib/rook/osd0) _read_fsid unparsable uuid ->2017-10-31 20:13:11.187208 I | mkfs-osd0: 2017-10-31 20:13:11.187026 7f0059d62e00 -1 bluestore(/var/lib/rook/osd0) _setup_block_symlink_or_file failed to create block symlink to /dev/disk/by-partuuid/651153ba-2dfc-4231-ba06-94759e5ba273: (17) File exists ->2017-10-31 20:13:11.187233 I | mkfs-osd0: 2017-10-31 20:13:11.187038 7f0059d62e00 -1 bluestore(/var/lib/rook/osd0) mkfs failed, (17) File exists ->2017-10-31 20:13:11.187254 I | mkfs-osd0: 2017-10-31 20:13:11.187042 7f0059d62e00 -1 OSD::mkfs: ObjectStore::mkfs failed with error (17) File exists ->2017-10-31 20:13:11.187275 I | mkfs-osd0: 2017-10-31 20:13:11.187121 7f0059d62e00 -1 ** ERROR: error creating empty object store in /var/lib/rook/osd0: (17) File exists ->``` - -### Solution - -If the error is from the file that already exists, this is a common problem reinitializing the Rook cluster when the local directory used for persistence has **not** been purged. -This directory is the `dataDirHostPath` setting in the cluster CRD and is typically set to `/var/lib/rook`. -To fix the issue you will need to delete all components of Rook and then delete the contents of `/var/lib/rook` (or the directory specified by `dataDirHostPath`) on each of the hosts in the cluster. -Then when the cluster CRD is applied to start a new cluster, the rook-operator should start all the pods as expected. - -## OSD pods are not created on my devices - -### Symptoms - -* No OSD pods are started in the cluster -* Devices are not configured with OSDs even though specified in the Cluster CRD -* One OSD pod is started on each node instead of multiple pods for each device - -### Investigation - -First, ensure that you have specified the devices correctly in the CRD. -The [Cluster CRD](ceph-cluster-crd.md#storage-selection-settings) has several ways to specify the devices that are to be consumed by the Rook storage: - -* `useAllDevices: true`: Rook will consume all devices it determines to be available -* `deviceFilter`: Consume all devices that match this regular expression -* `devices`: Explicit list of device names on each node to consume - -Second, if Rook determines that a device is not available (has existing partitions or a formatted filesystem), Rook will skip consuming the devices. -If Rook is not starting OSDs on the devices you expect, Rook may have skipped it for this reason. To see if a device was skipped, view the OSD preparation log -on the node where the device was skipped. Note that it is completely normal and expected for OSD prepare pod to be in the `completed` state. -After the job is complete, Rook leaves the pod around in case the logs need to be investigated. - -```console -# Get the prepare pods in the cluster -kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-ceph-osd-prepare-node1-fvmrp 0/1 Completed 0 18m ->rook-ceph-osd-prepare-node2-w9xv9 0/1 Completed 0 22m ->rook-ceph-osd-prepare-node3-7rgnv 0/1 Completed 0 22m ->``` - -```console -# view the logs for the node of interest in the "provision" container -kubectl -n rook-ceph logs rook-ceph-osd-prepare-node1-fvmrp provision -[...] -``` - -Here are some key lines to look for in the log: - ->``` -># A device will be skipped if Rook sees it has partitions or a filesystem ->2019-05-30 19:02:57.353171 W | cephosd: skipping device sda that is in use ->2019-05-30 19:02:57.452168 W | skipping device "sdb5": ["Used by ceph-disk"] -> -># Other messages about a disk being unusable by ceph include: ->Insufficient space (<5GB) on vgs ->Insufficient space (<5GB) ->LVM detected ->Has BlueStore device label ->locked ->read-only -> -># A device is going to be configured ->2019-05-30 19:02:57.535598 I | cephosd: device sdc to be configured by ceph-volume -> -># For each device configured you will see a report printed to the log ->2019-05-30 19:02:59.844642 I | Type Path LV Size % of device ->2019-05-30 19:02:59.844651 I | ---------------------------------------------------------------------------------------------------- ->2019-05-30 19:02:59.844677 I | [data] /dev/sdc 7.00 GB 100% ->``` - -### Solution - -Either update the CR with the correct settings, or clean the partitions or filesystem from your devices. -To clean devices from a previous install see the [cleanup guide](ceph-teardown.md#zapping-devices). - -After the settings are updated or the devices are cleaned, trigger the operator to analyze the devices again by restarting the operator. -Each time the operator starts, it will ensure all the desired devices are configured. The operator does automatically -deploy OSDs in most scenarios, but an operator restart will cover any scenarios that the operator doesn't detect automatically. - -```console -# Restart the operator to ensure devices are configured. A new pod will automatically be started when the current operator pod is deleted. -kubectl -n rook-ceph delete pod -l app=rook-ceph-operator -[...] -``` - -## Node hangs after reboot - -This issue is fixed in Rook v1.3 or later. - -### Symptoms - -* After issuing a `reboot` command, node never returned online -* Only a power cycle helps - -### Investigation - -On a node running a pod with a Ceph persistent volume - -```console -mount | grep rbd -``` ->``` -># _netdev mount option is absent, also occurs for cephfs -># OS is not aware PV is mounted over network ->/dev/rbdx on ... (rw,relatime, ..., noquota) ->``` - -When the reboot command is issued, network interfaces are terminated before disks -are unmounted. This results in the node hanging as repeated attempts to unmount -Ceph persistent volumes fail with the following error: - ->``` ->libceph: connect [monitor-ip]:6789 error -101 ->``` - -### Solution - -The node needs to be [drained](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) before reboot. After the successful drain, the node can be rebooted as usual. - -Because `kubectl drain` command automatically marks the node as unschedulable (`kubectl cordon` effect), the node needs to be uncordoned once it's back online. - -Drain the node: - -```console -$ kubectl drain --ignore-daemonsets --delete-local-data -``` - -Uncordon the node: - -```console -$ kubectl uncordon -``` - -## Rook Agent modprobe exec format error - -### Symptoms - -* PersistentVolumes from Ceph fail/timeout to mount -* Rook Agent logs contain `modinfo: ERROR: could not get modinfo from 'rbd': Exec format error` lines - -### Solution - -If it is feasible to upgrade your kernel, you should upgrade to `4.x`, even better is >= `4.7` due to a feature for CephFS added to the kernel. - -If you are unable to upgrade the kernel, you need to go to each host that will consume storage and run: - -```console -modprobe rbd -``` - -This command inserts the `rbd` module into the kernel. - -To persist this fix, you need to add the `rbd` kernel module to either `/etc/modprobe.d/` or `/etc/modules-load.d/`. -For both paths create a file called `rbd.conf` with the following content: - -```console -rbd -``` - -Now when a host is restarted, the module should be loaded automatically. - -## Rook Agent rbd module missing error - -### Symptoms - -* Rook Agent in `Error` or `CrashLoopBackOff` status when deploying the Rook operator with `kubectl create -f operator.yaml`: - -```console -kubectl -n rook-ceph get pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-ceph-agent-gfrm5 0/1 Error 0 14s ->rook-ceph-operator-5f4866946-vmtff 1/1 Running 0 23s ->rook-discover-qhx6c 1/1 Running 0 14s ->``` - -* Rook Agent logs contain below messages: - ->``` ->2018-08-10 09:09:09.461798 I | exec: Running command: cat /lib/modules/4.15.2/modules.builtin ->2018-08-10 09:09:09.473858 I | exec: Running command: modinfo -F parm rbd ->2018-08-10 09:09:09.477215 N | ceph-volumeattacher: failed rbd single_major check, assuming it's unsupported: failed to check for rbd module single_major param: Failed to complete 'check kmod param': exit status 1. modinfo: ERROR: Module rbd not found. ->2018-08-10 09:09:09.477239 I | exec: Running command: modprobe rbd ->2018-08-10 09:09:09.480353 I | modprobe rbd: modprobe: FATAL: Module rbd not found. ->2018-08-10 09:09:09.480452 N | ceph-volumeattacher: failed to load kernel module rbd: failed to load kernel module rbd: Failed to complete 'modprobe rbd': exit status 1. ->failed to run rook ceph agent. failed to create volume manager: failed to load >kernel module rbd: Failed to complete 'modprobe rbd': exit status 1. ->``` - -### Solution - -From the log message of Agent, we can see that the `rbd` kernel module is not available in the current system, neither as a builtin nor a loadable external kernel module. - -In this case, you have to [re-configure and build](https://www.linuxjournal.com/article/6568) a new kernel to address this issue, there're two options: - -* Re-configure your kernel to make sure the `CONFIG_BLK_DEV_RBD=y` in the `.config` file, then build the kernel. -* Re-configure your kernel to make sure the `CONFIG_BLK_DEV_RBD=m` in the `.config` file, then build the kernel. - -Rebooting the system to use the new kernel, this issue should be fixed: the Agent will be in normal `running` status if everything was done correctly. - -## Using multiple shared filesystem (CephFS) is attempted on a kernel version older than 4.7 - -### Symptoms - -* More than one shared filesystem (CephFS) has been created in the cluster -* A pod attempts to mount any other shared filesystem besides the **first** one that was created -* The pod incorrectly gets the first filesystem mounted instead of the intended filesystem - -### Solution - -The only solution to this problem is to upgrade your kernel to `4.7` or higher. -This is due to a mount flag added in the kernel version `4.7` which allows to chose the filesystem by name. - -For additional info on the kernel version requirement for multiple shared filesystems (CephFS), see [Filesystem - Kernel version requirement](ceph-filesystem.md#kernel-version-requirement). - -## Set debug log level for all Ceph daemons - -You can set a given log level and apply it to all the Ceph daemons at the same time. -For this, make sure the toolbox pod is running, then determine the level you want (between 0 and 20). -You can find the list of all subsystems and their default values in [Ceph logging and debug official guide](https://docs.ceph.com/en/latest/rados/troubleshooting/log-and-debug/#ceph-subsystems). Be careful when increasing the level as it will produce very verbose logs. - -Assuming you want a log level of 1, you will run: - -```console -kubectl -n rook-ceph exec deploy/rook-ceph-tools -- set-ceph-debug-level 1 -``` - -Output: - ->```quote -> ceph config set global debug_context 1 -> ceph config set global debug_lockdep 1 ->... ->... ->``` - -Once you are done debugging, you can revert all the debug flag to their default value by running the following: - -```console -kubectl -n rook-ceph exec deploy/rook-ceph-tools -- set-ceph-debug-level default -``` - -## Activate log to file for a particular Ceph daemon - -They are cases where looking at Kubernetes logs is not enough for diverse reasons, but just to name a few: - -* not everyone is familiar for Kubernetes logging and expects to find logs in traditional directories -* logs get eaten (buffer limit from the log engine) and thus not requestable from Kubernetes - -So for each daemon, `dataDirHostPath` is used to store logs, if logging is activated. -Rook will bindmount `dataDirHostPath` for every pod. -Let's say you want to enable logging for `mon.a`, but only for this daemon. -Using the toolbox or from inside the operator run: - -```console -ceph config set mon.a log_to_file true -``` - -This will activate logging on the filesystem, you will be able to find logs in `dataDirHostPath/$NAMESPACE/log`, so typically this would mean `/var/lib/rook/rook-ceph/log`. -You don't need to restart the pod, the effect will be immediate. - -To disable the logging on file, simply set `log_to_file` to `false`. - -## A worker node using RBD devices hangs up - -### Symptoms - -* There is no progress on I/O from/to one of RBD devices (`/dev/rbd*` or `/dev/nbd*`). -* After that, the whole worker node hangs up. - -### Investigation - -This happens when the following conditions are satisfied. - -- The problematic RBD device and the corresponding OSDs are co-located. -- There is an XFS filesystem on top of this device. - -In addition, when this problem happens, you can see the following messages in `dmesg`. - -```console -dmesg -``` ->``` ->... ->[51717.039319] INFO: task kworker/2:1:5938 blocked for more than 120 seconds. ->[51717.039361] Not tainted 4.15.0-72-generic #81-Ubuntu ->[51717.039388] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. ->... ->``` - -It's so-called `hung_task` problem and means that there is a deadlock in the kernel. For more detail, please refer to [the corresponding issue comment](https://github.com/rook/rook/issues/3132#issuecomment-580508760). - -### Solution - -This problem will be solve by the following two fixes. - -* Linux kernel: A minor feature that is introduced by [this commit](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=8d19f1c8e1937baf74e1962aae9f90fa3aeab463). It will be included in Linux v5.6. -* Ceph: A fix that uses the above-mentioned kernel's feature. The Ceph community will probably discuss this fix after releasing Linux v5.6. - -You can bypass this problem by using ext4 or any other filesystems rather than XFS. Filesystem type can be specified with `csi.storage.k8s.io/fstype` in StorageClass resource. - -## Too few PGs per OSD warning is shown - -### Symptoms - -- `ceph status` shows "too few PGs per OSD" warning as follows. - -```console -ceph status -``` - ->``` -> cluster: -> id: fd06d7c3-5c5c-45ca-bdea-1cf26b783065 -> health: HEALTH_WARN -> too few PGs per OSD (16 < min 30) ->``` - -### Solution - -The meaning of this warning is written in [the document](https://docs.ceph.com/docs/master/rados/operations/health-checks#too-few-pgs). -However, in many cases it is benign. For more information, please see [the blog entry](http://ceph.com/community/new-luminous-pg-overdose-protection/). -Please refer to [Configuring Pools](ceph-advanced-configuration.md#configuring-pools) if you want to know the proper `pg_num` of pools and change these values. - -## LVM metadata can be corrupted with OSD on LV-backed PVC - -### Symptoms - -There is a critical flaw in OSD on LV-backed PVC. LVM metadata can be corrupted if both the host and OSD container modify it simultaneously. For example, the administrator might modify it on the host, while the OSD initialization process in a container could modify it too. In addition, if `lvmetad` is running, the possibility of occurrence gets higher. In this case, the change of LVM metadata in OSD container is not reflected to LVM metadata cache in host for a while. - -If you still decide to configure an OSD on LVM, please keep the following in mind to reduce the probability of this issue. - -### Solution - -- Disable `lvmetad.` -- Avoid configuration of LVs from the host. In addition, don't touch the VGs and physical volumes that back these LVs. -- Avoid incrementing the `count` field of `storageClassDeviceSets` and create a new LV that backs an OSD simultaneously. - -You can know whether the above-mentioned tag exists with the command: `sudo lvs -o lv_name,lv_tags`. If the `lv_tag` field is empty in an LV corresponding to the OSD lv_tags, this OSD encountered the problem. In this case, please [retire this OSD](ceph-osd-mgmt.md#remove-an-osd) or replace with other new OSD before restarting. - -This problem doesn't happen in newly created LV-backed PVCs because OSD container doesn't modify LVM metadata anymore. The existing lvm mode OSDs work continuously even thought upgrade your Rook. However, using the raw mode OSDs is recommended because of the above-mentioned problem. You can replace the existing OSDs with raw mode OSDs by retiring them and adding new OSDs one by one. See the documents [Remove an OSD](ceph-osd-mgmt.md#remove-an-osd) and [Add an OSD on a PVC](ceph-osd-mgmt.md#add-an-osd-on-a-pvc). - -## OSD prepare job fails due to low aio-max-nr setting - -If the Kernel is configured with a low [aio-max-nr setting](https://www.kernel.org/doc/Documentation/sysctl/fs.txt), the OSD prepare job might fail with the following error: - -```text -exec: stderr: 2020-09-17T00:30:12.145+0000 7f0c17632f40 -1 bdev(0x56212de88700 /var/lib/ceph/osd/ceph-0//block) _aio_start io_setup(2) failed with EAGAIN; try increasing /proc/sys/fs/aio-max-nr -``` - -To overcome this, you need to increase the value of `fs.aio-max-nr` of your sysctl configuration (typically `/etc/sysctl.conf`). -You can do this with your favorite configuration management system. - -Alternatively, you can have a [DaemonSet](https://github.com/rook/rook/issues/6279#issuecomment-694390514) to apply the configuration for you on all your nodes. - -## Failed to create CRDs -If you are using Kubernetes version is v1.15 or older, you will see an error like this: ->``` ->unable to recognize "STDIN": no matches for kind "CustomResourceDefinition" in version "apiextensions.k8s.io/v1" ->``` -You need to create the CRDs found in `cluster/examples/kubernetes/ceph/pre-k8s-1.16`. Note that these pre-1.16 `apiextensions.k8s.io/v1beta1` CRDs are deprecated in k8s v1.16 and will no longer be supported from k8s v1.22. - - -## Unexpected partitions created - -### Symptoms -**Users running Rook versions v1.6.0-v1.6.7 may observe unwanted OSDs on partitions that appear -unexpectedly and seemingly randomly, which can corrupt existing OSDs.** - -Unexpected partitions are created on host disks that are used by Ceph OSDs. This happens more often -on SSDs than HDDs and usually only on disks that are 875GB or larger. Many tools like `lsblk`, -`blkid`, `udevadm`, and `parted` will not show a partition table type for the partition. Newer -versions of `blkid` are generally able to recognize the type as "atari". - -The underlying issue causing this is Atari partition (sometimes identified as AHDI) support in the -Linux kernel. Atari partitions have very relaxed specifications compared to other partition types, -and it is relatively easy for random data written to a disk to appear as an Atari partition to the -Linux kernel. Ceph's Bluestore OSDs have an anecdotally high probability of writing data on to disks -that can appear to the kernel as an Atari partition. - -Below is an example of `lsblk` output from a node where phantom Atari partitions are present. Note -that `sdX1` is never present for the phantom partitions, and `sdX2` is 48G on all disks. `sdX3` -is a variable size and may not always be present. It is possible for `sdX4` to appear, though it is -an anecdotally rare event. -``` -# lsblk -NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT -sdb 8:16 0 3T 0 disk -├─sdb2 8:18 0 48G 0 part -└─sdb3 8:19 0 6.1M 0 part -sdc 8:32 0 3T 0 disk -├─sdc2 8:34 0 48G 0 part -└─sdc3 8:35 0 6.2M 0 part -sdd 8:48 0 3T 0 disk -├─sdd2 8:50 0 48G 0 part -└─sdd3 8:51 0 6.3M 0 part -``` - -You can see https://github.com/rook/rook/issues/7940 for more detailed information and discussion. - -### Solution -#### Recover from corruption (v1.6.0-v1.6.7) -If you are using Rook v1.6, you must first update to v1.6.8 or higher to avoid further incidents of -OSD corruption caused by these Atari partitions. - -An old workaround suggested using `deviceFilter: ^sd[a-z]+$`, but this still results in unexpected -partitions. Rook will merely stop creating new OSDs on the partitions. It does not fix a related -issue that `ceph-volume` that is unaware of the Atari partition problem. Users who used this -workaround are still at risk for OSD failures in the future. - -To resolve the issue, immediately update to v1.6.8 or higher. After the update, no corruption should -occur on OSDs created in the future. Next, to get back to a healthy Ceph cluster state, focus on one -corruped disk at a time and [remove all OSDs on each corrupted disk](ceph-osd-mgmt.md#remove-an-osd) -one disk at a time. - -As an example, you may have `/dev/sdb` with two unexpected partitions (`/dev/sdb2` and `/dev/sdb3`) -as well as a second corrupted disk `/dev/sde` with one unexpected partition (`/dev/sde2`). -1. First, remove the OSDs associated with `/dev/sdb`, `/dev/sdb2`, and `/dev/sdb3`. There might be - only one, or up to 3 OSDs depending on how your system was affected. Again see the - [OSD management doc](ceph-osd-mgmt.md#remove-an-osd). -2. Use `dd` to wipe the first sectors of the partitions followed by the disk itself. E.g., - - `dd if=/dev/zero of=/dev/sdb2 bs=1M` - - `dd if=/dev/zero of=/dev/sdb3 bs=1M` - - `dd if=/dev/zero of=/dev/sdb bs=1M` -3. Then wipe clean `/dev/sdb` to prepare it for a new OSD. - See [the teardown document](ceph-teardown.md#zapping-devices) for details. -4. After this, scale up the Rook operator to deploy a new OSD to `/dev/sdb`. This will allow Ceph to - use `/dev/sdb` for data recovery and replication while the next OSDs are removed. -5. Now Repeat steps 1-4 for `/dev/sde` and `/dev/sde2`, and continue for any other corruped disks. - -If your Rook-Ceph cluster does not have any critical data stored in it, it may be simpler to -uninstall Rook completely and redeploy with v1.6.8 or higher. diff --git a/Documentation/ceph-configuration.md b/Documentation/ceph-configuration.md deleted file mode 100644 index 751337cff..000000000 --- a/Documentation/ceph-configuration.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Configuration -weight: 3700 -indent: true ---- - -# Configuration - -For most any Ceph cluster, the user will want to--and may need to--change some Ceph -configurations. These changes often may be warranted in order to alter performance to meet SLAs or -to update default data resiliency settings. - -> **WARNING**: Modify Ceph settings carefully, and review the -> [Ceph configuration documentation](https://docs.ceph.com/docs/master/rados/configuration/) before -> making any changes. -> Changing the settings could result in unhealthy daemons or even data loss if -> used incorrectly. - -## Required configurations - -Rook and Ceph both strive to make configuration as easy as possible, but there are some -configuration options which users are well advised to consider for any production cluster. - -### Default PG and PGP counts - -The number of PGs and PGPs can be configured on a per-pool basis, but it is highly advised to set -default values that are appropriate for your Ceph cluster. Appropriate values depend on the number -of OSDs the user expects to have backing each pool. The Ceph [OSD and Pool config -docs](https://docs.ceph.com/docs/master/rados/operations/placement-groups/#a-preselection-of-pg-num) -provide detailed information about how to tune these parameters: `osd_pool_default_pg_num` and `osd_pool_default_pgp_num`. - -Nautilus [introduced the PG auto-scaler mgr module](https://ceph.com/rados/new-in-nautilus-pg-merging-and-autotuning/) -capable of automatically managing PG and PGP values for pools. Please see -[Ceph New in Nautilus: PG merging and autotuning](https://ceph.io/rados/new-in-nautilus-pg-merging-and-autotuning/) -for more information about this module. - -In Nautilus, This module is not enabled by default, but can be enabled by the following setting -in the [CephCluster CR](ceph-cluster-crd.md#mgr-settings): - -```yaml -spec: - mgr: - modules: - - name: pg_autoscaler - enabled: true -``` - -In Octopus (v15.2.x) and newer, this module is enabled by default without the above-mentioned setting. - -With that setting, the autoscaler will be enabled for all new pools. If you do not desire to have -the autoscaler enabled for all new pools, you will need to use the Rook toolbox to enable the module -and [enable the autoscaling](https://docs.ceph.com/docs/master/rados/operations/placement-groups/) -on individual pools. - -The autoscaler is not enabled for the existing pools after enabling the module. So if you want to -enable the autoscaling for these existing pools, they must be configured from the toolbox. - -## Specifying configuration options - -### Toolbox + Ceph CLI - -The most recommended way of configuring Ceph is to set Ceph's configuration directly. The first -method for doing so is to use Ceph's CLI from the Rook-Ceph toolbox pod. Using the toolbox pod is -detailed [here](ceph-toolbox.md). From the toolbox, the user can change Ceph configurations, enable -manager modules, create users and pools, and much more. - -### Ceph Dashboard - -The Ceph Dashboard, examined in more detail [here](ceph-dashboard.md), is another way of setting -some of Ceph's configuration directly. Configuration by the Ceph dashboard is recommended with the -same priority as configuration via the Ceph CLI (above). - -### Advanced configuration via ceph.conf override ConfigMap - -Setting configs via Ceph's CLI requires that at least one mon be available for the configs to be -set, and setting configs via dashboard requires at least one mgr to be available. Ceph may also have -a small number of very advanced settings that aren't able to be modified easily via CLI or -dashboard. The **least** recommended method for configuring Ceph is intended as a last-resort -fallback in situations like these. This is covered in detail -[here](ceph-advanced-configuration.md#custom-cephconf-settings). diff --git a/Documentation/ceph-csi-drivers.md b/Documentation/ceph-csi-drivers.md deleted file mode 100644 index ccc3ba244..000000000 --- a/Documentation/ceph-csi-drivers.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Ceph CSI -weight: 3200 -indent: true ---- - -# Ceph CSI Drivers - -There are two CSI drivers integrated with Rook that will enable different scenarios: - -* RBD: This driver is optimized for RWO pod access where only one pod may access the storage -* CephFS: This driver allows for RWX with one or more pods accessing the same storage - -The drivers are enabled automatically with the Rook operator. They will be started -in the same namespace as the operator when the first CephCluster CR is created. - -For documentation on consuming the storage: - -* RBD: See the [Block Storage](ceph-block.md) topic -* CephFS: See the [Shared Filesystem](ceph-filesystem.md) topic - -## Static Provisioning - -Both drivers also support the creation of static PV and static PVC from existing RBD image/CephFS volume. Refer to [static PVC](https://github.com/ceph/ceph-csi/blob/devel/docs/static-pvc.md) for more information. - -## Configure CSI Drivers in non-default namespace - -If you've deployed the Rook operator in a namespace other than "rook-ceph", -change the prefix in the provisioner to match the namespace you used. For -example, if the Rook operator is running in the namespace "my-namespace" the -provisioner value should be "my-namespace.rbd.csi.ceph.com". The same provisioner -name needs to be set in both the storageclass and snapshotclass. - -## Liveness Sidecar - -All CSI pods are deployed with a sidecar container that provides a prometheus metric for tracking if the CSI plugin is alive and running. -These metrics are meant to be collected by prometheus but can be accesses through a GET request to a specific node ip. -for example `curl -X get http://[pod ip]:[liveness-port][liveness-path] 2>/dev/null | grep csi` -the expected output should be - -```console -curl -X GET http://10.109.65.142:9080/metrics 2>/dev/null | grep csi -``` - ->``` -># HELP csi_liveness Liveness Probe -># TYPE csi_liveness gauge ->csi_liveness 1 ->``` - -Check the [monitoring doc](ceph-monitoring.md) to see how to integrate CSI -liveness and grpc metrics into ceph monitoring. - -## Dynamically Expand Volume - -### Prerequisite - -* For filesystem resize to be supported for your Kubernetes cluster, the - kubernetes version running in your cluster should be >= v1.15 and for block - volume resize support the Kubernetes version should be >= v1.16. Also, - `ExpandCSIVolumes` feature gate has to be enabled for the volume resize - functionality to work. - -To expand the PVC the controlling StorageClass must have `allowVolumeExpansion` -set to `true`. `csi.storage.k8s.io/controller-expand-secret-name` and -`csi.storage.k8s.io/controller-expand-secret-namespace` values set in -storageclass. Now expand the PVC by editing the PVC -`pvc.spec.resource.requests.storage` to a higher values than the current size. -Once PVC is expanded on backend and same is reflected size is reflected on -application mountpoint, the status capacity `pvc.status.capacity.storage` of -PVC will be updated to new size. - -## RBD Mirroring - -To support RBD Mirroring, the [Volume Replication Operator](https://github.com/csi-addons/volume-replication-operator/blob/main/README.md) will be started in the RBD provisioner pod. -Volume Replication Operator is a kubernetes operator that provides common and reusable APIs for storage disaster recovery. It is based on [csi-addons/spec](https://github.com/csi-addons/spec) specification and can be used by any storage provider. -It follows controller pattern and provides extended APIs for storage disaster recovery. The extended APIs are provided via Custom Resource Definition (CRD). -To enable volume replication: -- For Helm deployments see the [helm settings](helm-operator.md#configuration). -- For non-Helm deployments set `CSI_ENABLE_VOLUME_REPLICATION: "true"` in the operator.yaml diff --git a/Documentation/ceph-csi-snapshot.md b/Documentation/ceph-csi-snapshot.md deleted file mode 100644 index 5ed0560ce..000000000 --- a/Documentation/ceph-csi-snapshot.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: Snapshots -weight: 3250 -indent: true ---- - -## Prerequisites - -- Rook officially supports v1/v1beta1 snapshots for kubernetes v1.17+. - -- Install the snapshot controller and snapshot v1/v1beta1 CRD as required. More info can be found [here](https://github.com/kubernetes-csi/external-snapshotter/tree/v4.0.0#usage). - -Note: If only Alpha snapshots are available, enable snapshotter in `rook-ceph-operator-config` or helm chart `values.yaml`, change the external-snapshotter image to `k8s.gcr.io/sig-storage/csi-snapshotter:v1.2.2` and refer to the [alpha snapshots documentation](https://github.com/rook/rook/blob/release-1.3/Documentation/ceph-csi-drivers.md#rbd-snapshots) - -* We also need a `VolumeSnapshotClass` for volume snapshot to work. The purpose of a `VolumeSnapshotClass` is -defined in [the kubernetes -documentation](https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/). -In short, as the documentation describes it: - -> Just like StorageClass provides a way for administrators to describe the -> “classes” of storage they offer when provisioning a volume, -> VolumeSnapshotClass provides a way to describe the “classes” of storage when -> provisioning a volume snapshot. - -## Upgrade Snapshot API - -If your Kubernetes version is updated to a newer version of the snapshot API, follow the upgrade guide [here](https://github.com/kubernetes-csi/external-snapshotter/tree/v4.0.0#upgrade) to upgrade from v1alpha1 to v1beta1, or v1beta1 to v1. - - -## RBD Snapshots - -### VolumeSnapshotClass - -In [VolumeSnapshotClass](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml), -the `csi.storage.k8s.io/snapshotter-secret-name` parameter should reference the -name of the secret created for the rbdplugin and `pool` to reflect the Ceph pool name. - -Update the value of the `clusterID` field to match the namespace that Rook is -running in. When Ceph CSI is deployed by Rook, the operator will automatically -maintain a configmap whose contents will match this key. By default this is -"rook-ceph". - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml -``` - -### Volumesnapshot - -In [snapshot](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml), -`volumeSnapshotClassName` should be the name of the `VolumeSnapshotClass` -previously created. The `persistentVolumeClaimName` should be the name of the -PVC which is already created by the RBD CSI driver. - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml -``` - -### Verify RBD Snapshot Creation - -```console -kubectl get volumesnapshotclass -``` - ->``` ->NAME DRIVER DELETIONPOLICY AGE ->csi-rbdplugin-snapclass rook-ceph.rbd.csi.ceph.com Delete 3h55m ->``` - -```console -kubectl get volumesnapshot -``` - ->``` ->NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE ->rbd-pvc-snapshot true rbd-pvc 1Gi csi-rbdplugin-snapclass snapcontent-79090db0-7c66-4b18-bf4a-634772c7cac7 3h50m 3h51m ->``` - -The snapshot will be ready to restore to a new PVC when the `READYTOUSE` field of the -`volumesnapshot` is set to true. - -### Restore the snapshot to a new PVC - -In -[pvc-restore](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml), -`dataSource` should be the name of the `VolumeSnapshot` previously -created. The `dataSource` kind should be the `VolumeSnapshot`. - -Create a new PVC from the snapshot - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml -``` - -### Verify RBD Clone PVC Creation - -```console -kubectl get pvc -``` ->``` ->NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE ->rbd-pvc Bound pvc-84294e34-577a-11e9-b34f-525400581048 1Gi RWO rook-ceph-block 34m ->rbd-pvc-restore Bound pvc-575537bf-577f-11e9-b34f-525400581048 1Gi RWO rook-ceph-block 8s ->``` - -## RBD snapshot resource Cleanup - -To clean your cluster of the resources created by this example, run the following: - -```console -kubectl delete -f cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml -kubectl delete -f cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml -kubectl delete -f cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml -``` - -## CephFS Snapshots - -### VolumeSnapshotClass - -In [VolumeSnapshotClass](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml), -the `csi.storage.k8s.io/snapshotter-secret-name` parameter should reference the -name of the secret created for the cephfsplugin. - -In the volumesnapshotclass, update the value of the `clusterID` field to match the namespace that Rook is -running in. When Ceph CSI is deployed by Rook, the operator will automatically -maintain a configmap whose contents will match this key. By default this is -"rook-ceph". - - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml -``` - -### Volumesnapshot - -In [snapshot](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml), -`volumeSnapshotClassName` should be the name of the `VolumeSnapshotClass` -previously created. The `persistentVolumeClaimName` should be the name of the -PVC which is already created by the CephFS CSI driver. - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml -``` - -### Verify CephFS Snapshot Creation - -```console -kubectl get volumesnapshotclass -``` ->``` ->NAME DRIVER DELETIONPOLICY AGE ->csi-cephfslugin-snapclass rook-ceph.cephfs.csi.ceph.com Delete 3h55m ->``` -```console -kubectl get volumesnapshot -``` - ->``` ->NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE ->cephfs-pvc-snapshot true cephfs-pvc 1Gi csi-cephfsplugin-snapclass snapcontent-34476204-a14a-4d59-bfbc-2bbba695652c 3h50m 3h51m ->``` - -The snapshot will be ready to restore to a new PVC when `READYTOUSE` field of the -`volumesnapshot` is set to true. - -### Restore the snapshot to a new PVC - -In -[pvc-restore](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml), -`dataSource` should be the name of the `VolumeSnapshot` previously -created. The `dataSource` kind should be the `VolumeSnapshot`. - -Create a new PVC from the snapshot - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml -``` - -### Verify CephFS Restore PVC Creation - -```console -kubectl get pvc -``` - ->``` ->NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE ->cephfs-pvc Bound pvc-74734901-577a-11e9-b34f-525400581048 1Gi RWX rook-cephfs 55m ->cephfs-pvc-restore Bound pvc-95308c75-6c93-4928-a551-6b5137192209 1Gi RWX rook-cephfs 34s ->``` - -## CephFS snapshot resource Cleanup - -To clean your cluster of the resources created by this example, run the following: - -```console -kubectl delete -f cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml -kubectl delete -f cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml -kubectl delete -f cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml -``` - -## Limitations - -* There is a limit of 400 snapshots per cephFS filesystem. -* The PVC cannot be deleted if it has snapshots. make sure all the snapshots on the PVC are deleted before you delete the PVC. diff --git a/Documentation/ceph-csi-troubleshooting.md b/Documentation/ceph-csi-troubleshooting.md deleted file mode 100644 index d27abee4f..000000000 --- a/Documentation/ceph-csi-troubleshooting.md +++ /dev/null @@ -1,443 +0,0 @@ ---- -title: CSI Common Issues -weight: 11125 -indent: true ---- - -# CSI Common Issues - -Issues when provisioning volumes with the Ceph CSI driver can happen for many reasons such as: - -- Network connectivity between CSI pods and ceph -- Cluster health issues -- Slow operations -- Kubernetes issues -- Ceph-CSI configuration or bugs - -The following troubleshooting steps can help identify a number of issues. - -### Block (RBD) - -If you are mounting block volumes (usually RWO), these are referred to as `RBD` volumes in Ceph. -See the sections below for RBD if you are having block volume issues. - -### Shared Filesystem (CephFS) - -If you are mounting shared filesystem volumes (usually RWX), these are referred to as `CephFS` volumes in Ceph. -See the sections below for CephFS if you are having filesystem volume issues. - -## Network Connectivity - -The Ceph monitors are the most critical component of the cluster to check first. -Retrieve the mon endpoints from the services: - -```console -kubectl -n rook-ceph get svc -l app=rook-ceph-mon -``` - ->``` ->NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ->rook-ceph-mon-a ClusterIP 10.104.165.31 6789/TCP,3300/TCP 18h ->rook-ceph-mon-b ClusterIP 10.97.244.93 6789/TCP,3300/TCP 21s ->rook-ceph-mon-c ClusterIP 10.99.248.163 6789/TCP,3300/TCP 8s ->``` - -If host networking is enabled in the CephCluster CR, you will instead need to find the -node IPs for the hosts where the mons are running. - -The `clusterIP` is the mon IP and `3300` is the port that will be used by Ceph-CSI to connect to the ceph cluster. -These endpoints must be accessible by all clients in the cluster, including the CSI driver. - -If you are seeing issues provisioning the PVC then you need to check the network connectivity from the provisioner pods. - -- For CephFS PVCs, check network connectivity from the `csi-cephfsplugin` container of the `csi-cephfsplugin-provisioner` pods -- For Block PVCs, check network connectivity from the `csi-rbdplugin` container of the `csi-rbdplugin-provisioner` pods - -For redundancy, there are two provisioner pods for each type. Make sure to test connectivity from all provisioner pods. - -Connect to the provisioner pods and verify the connection to the mon endpoints such as the following: - -```console -# Connect to the csi-cephfsplugin container in the provisioner pod -kubectl -n rook-ceph exec -ti deploy/csi-cephfsplugin-provisioner -c csi-cephfsplugin -- bash - -# Test the network connection to the mon endpoint -curl 10.104.165.31:3300 2>/dev/null -ceph v2 -``` - -If you see the response "ceph v2", the connection succeeded. -If there is no response then there is a network issue connecting to the ceph cluster. - -Check network connectivity for all monitor IP’s and ports which are passed to ceph-csi. - -## Ceph Health - -Sometimes an unhealthy Ceph cluster can contribute to the issues in creating or mounting the PVC. -Check that your Ceph cluster is healthy by connecting to the [Toolbox](ceph-toolbox.md) and -running the `ceph` commands: - -```console -ceph health detail -``` - -```console -HEALTH_OK -``` - -## Slow Operations - -Even slow ops in the ceph cluster can contribute to the issues. In the toolbox, -make sure that no slow ops are present and the ceph cluster is healthy - -```console -ceph -s -``` ->``` ->cluster: -> id: ba41ac93-3b55-4f32-9e06-d3d8c6ff7334 -> health: HEALTH_WARN -> 30 slow ops, oldest one blocked for 10624 sec, mon.a has slow ops ->``` - -If Ceph is not healthy, check the following health for more clues: - -- The Ceph monitor logs for errors -- The OSD logs for errors -- Disk Health -- Network Health - -## Ceph Troubleshooting - -### Check if the RBD Pool exists - -Make sure the pool you have specified in the `storageclass.yaml` exists in the ceph cluster. - -Suppose the pool name mentioned in the `storageclass.yaml` is `replicapool`. It can be verified -to exist in the toolbox: - -```console -ceph osd lspools -``` - ->``` ->1 device_health_metrics ->2 replicapool ->``` - -If the pool is not in the list, create the `CephBlockPool` CR for the pool if you have not already. -If you have already created the pool, check the Rook operator log for errors creating the pool. - -### Check if the Filesystem exists - -For the shared filesystem (CephFS), check that the filesystem and pools you have specified in the `storageclass.yaml` exist in the Ceph cluster. - -Suppose the `fsName` name mentioned in the `storageclass.yaml` is `myfs`. It can be verified in the toolbox: - -```console -ceph fs ls -``` ->``` ->name: myfs, metadata pool: myfs-metadata, data pools: [myfs-data0 ] ->``` - -Now verify the `pool` mentioned in the `storageclass.yaml` exists, such as the example `myfs-data0`. - -```console -ceph osd lspools -``` - ->``` ->1 device_health_metrics ->2 replicapool ->3 myfs-metadata0 ->4 myfs-data0 ->``` - -The pool for the filesystem will have the suffix `-data0` compared the filesystem name that is created -by the CephFilesystem CR. - -### subvolumegroups - -If the subvolumegroup is not specified in the ceph-csi configmap (where you have passed the ceph monitor information), -Ceph-CSI creates the default subvolumegroup with the name csi. Verify that the subvolumegroup -exists: - -```console -ceph fs subvolumegroup ls myfs -``` - ->``` ->[ -> { -> "name": "csi" -> } ->] ->``` - -If you don’t see any issues with your Ceph cluster, the following sections will start debugging the issue from the CSI side. - -## Provisioning Volumes - -At times the issue can also exist in the Ceph-CSI or the sidecar containers used in Ceph-CSI. - -Ceph-CSI has included number of sidecar containers in the provisioner pods such as: -`csi-attacher`, `csi-resizer`, `csi-provisioner`, `csi-cephfsplugin`, `csi-snapshotter`, and `liveness-prometheus`. - -The CephFS provisioner core CSI driver container name is `csi-cephfsplugin` as one of the container names. -For the RBD (Block) provisioner you will see `csi-rbdplugin` as the container name. - -Here is a summary of the sidecar containers: - -### csi-provisioner - -The external-provisioner is a sidecar container that dynamically provisions volumes by calling `ControllerCreateVolume()` -and `ControllerDeleteVolume()` functions of CSI drivers. More details about external-provisioner can be found here. - -If there is an issue with PVC Create or Delete, check the logs of the `csi-provisioner` sidecar container. - -```console -kubectl -n rook-ceph logs deploy/csi-rbdplugin-provisioner -c csi-provisioner -``` - -### csi-resizer - -The CSI `external-resizer` is a sidecar container that watches the Kubernetes API server for PersistentVolumeClaim -updates and triggers `ControllerExpandVolume` operations against a CSI endpoint if the user requested more storage -on the PersistentVolumeClaim object. More details about external-provisioner can be found here. - -If any issue exists in PVC expansion you can check the logs of the `csi-resizer` sidecar container. - -```console -kubectl -n rook-ceph logs deploy/csi-rbdplugin-provisioner -c csi-resizer -``` - -### csi-snapshotter - -The CSI external-snapshotter sidecar only watches for `VolumeSnapshotContent` create/update/delete events. -It will talk to ceph-csi containers to create or delete snapshots. More details about external-snapshotter can -be found [here](https://github.com/kubernetes-csi/external-snapshotter). - -**In Kubernetes 1.17 the volume snapshot feature was promoted to beta. In Kubernetes 1.20, the feature gate is enabled by default on standard Kubernetes deployments and cannot be turned off.** - -Make sure you have installed the correct snapshotter CRD version. If you have not installed the snapshotter -controller, see the [Snapshots guide](ceph-csi-snapshot.md). - -```console -kubectl get crd | grep snapshot -``` - ->``` ->volumesnapshotclasses.snapshot.storage.k8s.io 2021-01-25T11:19:38Z ->volumesnapshotcontents.snapshot.storage.k8s.io 2021-01-25T11:19:39Z ->volumesnapshots.snapshot.storage.k8s.io 2021-01-25T11:19:40Z ->``` - -The above CRDs must have the matching version in your `snapshotclass.yaml` or `snapshot.yaml`. -Otherwise, the `VolumeSnapshot` and `VolumesnapshotContent` will not be created. - -The snapshot controller is responsible for creating both `VolumeSnapshot` and -`VolumesnapshotContent` object. If the objects are not getting created, you may need to -check the logs of the snapshot-controller container. - -Rook only installs the snapshotter sidecar container, not the controller. It is recommended -that Kubernetes distributors bundle and deploy the controller and CRDs as part of their Kubernetes cluster -management process (independent of any CSI Driver). - -If your Kubernetes distribution does not bundle the snapshot controller, you may manually install these components. - -If any issue exists in the snapshot Create/Delete operation you can check the logs of the csi-snapshotter sidecar container. - -```console -kubectl -n rook-ceph logs deploy/csi-rbdplugin-provisioner -c csi-snapshotter -``` - -If you see an error such as: - ->``` ->GRPC error: rpc error: code = Aborted desc = an operation with the given Volume ID ->0001-0009-rook-ceph-0000000000000001-8d0ba728-0e17-11eb-a680-ce6eecc894de already >exists. ->``` - -The issue typically is in the Ceph cluster or network connectivity. If the issue is -in Provisioning the PVC Restarting the Provisioner pods help(for CephFS issue -restart `csi-cephfsplugin-provisioner-xxxxxx` CephFS Provisioner. For RBD, restart -the `csi-rbdplugin-provisioner-xxxxxx` pod. If the issue is in mounting the PVC, -restart the `csi-rbdplugin-xxxxx` pod (for RBD) and the `csi-cephfsplugin-xxxxx` pod -for CephFS issue. - -## Mounting the volume to application pods - -When a user requests to create the application pod with PVC, there is a three-step process - -- CSI driver registration -- Create volume attachment object -- Stage and publish the volume - -### csi-driver registration - -`csi-cephfsplugin-xxxx` or `csi-rbdplugin-xxxx` is a daemonset pod running on all the nodes - where your application gets scheduled. If the plugin pods are not running on the node where - your application is scheduled might cause the issue, make sure plugin pods are always running. - -Each plugin pod has two important containers: one is `driver-registrar` and `csi-rbdplugin` or -`csi-cephfsplugin`. Sometimes there is also a `liveness-prometheus` container. - -### driver-registrar - -The node-driver-registrar is a sidecar container that registers the CSI driver with Kubelet. -More details can be found [here](https://github.com/kubernetes-csi/node-driver-registrar). - -If any issue exists in attaching the PVC to the application pod check logs from driver-registrar -sidecar container in plugin pod where your application pod is scheduled. - -```console -kubectl -n rook-ceph logs deploy/csi-rbdplugin -c driver-registrar -``` - ->``` ->I0120 12:28:34.231761 124018 main.go:112] Version: v2.0.1 ->I0120 12:28:34.233910 124018 connection.go:151] Connecting to unix:///csi/csi.sock ->I0120 12:28:35.242469 124018 node_register.go:55] Starting Registration Server at: /registration/rook-ceph.rbd.csi.ceph.com-reg.sock ->I0120 12:28:35.243364 124018 node_register.go:64] Registration Server started at: /registration/rook-ceph.rbd.csi.ceph.com-reg.sock ->I0120 12:28:35.243673 124018 node_register.go:86] Skipping healthz server because port set to: 0 ->I0120 12:28:36.318482 124018 main.go:79] Received GetInfo call: &InfoRequest{} ->I0120 12:28:37.455211 124018 main.go:89] Received NotifyRegistrationStatus call: &RegistrationStatus{PluginRegistered:true,Error:,} ->E0121 05:19:28.658390 124018 connection.go:129] Lost connection to unix:///csi/csi.sock. ->E0125 07:11:42.926133 124018 connection.go:129] Lost connection to unix:///csi/csi.sock. ->``` - -You should see the response `RegistrationStatus{PluginRegistered:true,Error:,}` in the logs to -confirm that plugin is registered with kubelet. - -If you see a driver not found an error in the application pod describe output. -Restarting the `csi-xxxxplugin-xxx` pod on the node may help. - -## Volume Attachment - -Each provisioner pod also has a sidecar container called `csi-attacher`. - -### csi-attacher - -The external-attacher is a sidecar container that attaches volumes to nodes by calling `ControllerPublish` and -`ControllerUnpublish` functions of CSI drivers. It is necessary because the internal Attach/Detach controller -running in Kubernetes controller-manager does not have any direct interfaces to CSI drivers. More details can -be found [here](https://github.com/kubernetes-csi/external-attacher). - -If any issue exists in attaching the PVC to the application pod first check the volumettachment object created -and also log from csi-attacher sidecar container in provisioner pod. - -```console -kubectl get volumeattachment -``` - ->``` ->NAME ATTACHER PV NODE ATTACHED AGE ->csi-75903d8a902744853900d188f12137ea1cafb6c6f922ebc1c116fd58e950fc92 rook-ceph.cephfs.csi.ceph.com pvc-5c547d2a-fdb8-4cb2-b7fe-e0f30b88d454 minikube true 4m26s ->``` - -```console -kubectl logs po/csi-rbdplugin-provisioner-d857bfb5f-ddctl -c csi-attacher -``` - -## CephFS Stale operations - -Check for any stale mount commands on the `csi-cephfsplugin-xxxx` pod on the node where your application pod is scheduled. - -You need to exec in the `csi-cephfsplugin-xxxx` pod and grep for stale mount operators. - -Identify the `csi-cephfsplugin-xxxx` pod running on the node where your application is scheduled with -`kubectl get po -o wide` and match the node names. - -```console -kubectl exec -it csi-cephfsplugin-tfk2g -c csi-cephfsplugin -- sh -ps -ef |grep mount - -root 67 60 0 11:55 pts/0 00:00:00 grep mount -``` - -```console -ps -ef |grep ceph - -root 1 0 0 Jan20 ? 00:00:26 /usr/local/bin/cephcsi --nodeid=minikube --type=cephfs --endpoint=unix:///csi/csi.sock --v=0 --nodeserver=true --drivername=rook-ceph.cephfs.csi.ceph.com --pidlimit=-1 --metricsport=9091 --forcecephkernelclient=true --metricspath=/metrics --enablegrpcmetrics=true -root 69 60 0 11:55 pts/0 00:00:00 grep ceph -``` - -If any commands are stuck check the **dmesg** logs from the node. -Restarting the `csi-cephfsplugin` pod may also help sometimes. - -If you don’t see any stuck messages, confirm the network connectivity, Ceph health, and slow ops. - -## RBD Stale operations - -Check for any stale `map/mkfs/mount` commands on the `csi-rbdplugin-xxxx` pod on the node where your application pod is scheduled. - -You need to exec in the `csi-rbdplugin-xxxx` pod and grep for stale operators like (`rbd map, rbd unmap, mkfs, mount` and `umount`). - -Identify the `csi-rbdplugin-xxxx` pod running on the node where your application is scheduled with -`kubectl get po -o wide` and match the node names. - -```console -kubectl exec -it csi-rbdplugin-vh8d5 -c csi-rbdplugin -- sh -``` -```console -ps -ef |grep map -``` ->``` ->root 1297024 1296907 0 12:00 pts/0 00:00:00 grep map ->``` - -```console -ps -ef |grep mount -``` ->``` ->root 1824 1 0 Jan19 ? 00:00:00 /usr/sbin/rpc.mountd ->ceph 1041020 1040955 1 07:11 ? 00:03:43 ceph-mgr --fsid=ba41ac93-3b55-4f32-9e06-d3d8c6ff7334 --keyring=/etc/ceph/keyring-store/keyring --log-to-stderr=true --err-to-stderr=true --mon-cluster-log-to-stderr=true --log-stderr-prefix=debug --default-log-to-file=false --default-mon-cluster-log-to-file=false --mon-host=[v2:10.111.136.166:3300,v1:10.111.136.166:6789] --mon-initial-members=a --id=a --setuser=ceph --setgroup=ceph --client-mount-uid=0 --client-mount-gid=0 --foreground --public-addr=172.17.0.6 ->root 1297115 1296907 0 12:00 pts/0 00:00:00 grep mount ->``` - -```console -ps -ef |grep mkfs -``` ->``` ->root 1297291 1296907 0 12:00 pts/0 00:00:00 grep mkfs ->``` - -```console -ps -ef |grep umount -``` ->``` ->root 1298500 1296907 0 12:01 pts/0 00:00:00 grep umount ->``` - -```console -ps -ef |grep unmap -``` ->``` ->root 1298578 1296907 0 12:01 pts/0 00:00:00 grep unmap ->``` - -If any commands are stuck check the **dmesg** logs from the node. -Restarting the `csi-rbdplugin` pod also may help sometimes. - -If you don’t see any stuck messages, confirm the network connectivity, Ceph health, and slow ops. - -## dmesg logs - -Check the dmesg logs on the node where pvc mounting is failing or the `csi-rbdplugin` container of the -`csi-rbdplugin-xxxx` pod on that node. - -```console -dmesg -``` - -## RBD Commands - -If nothing else helps, get the last executed command from the ceph-csi pod logs and run it manually inside -the provisioner or plugin pod to see if there are errors returned even if they couldn't be seen in the logs. - -```console -$ rbd ls --id=csi-rbd-node -m=10.111.136.166:6789 --key=AQDpIQhg+v83EhAAgLboWIbl+FL/nThJzoI3Fg== -``` - -Where `-m` is one of the mon endpoints and the `--key` is the key used by the CSI driver for accessing the Ceph cluster. diff --git a/Documentation/ceph-csi-volume-clone.md b/Documentation/ceph-csi-volume-clone.md deleted file mode 100644 index 73585634b..000000000 --- a/Documentation/ceph-csi-volume-clone.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: Volume clone -weight: 3250 -indent: true ---- - -The CSI Volume Cloning feature adds support for specifying existing PVCs in the -`dataSource` field to indicate a user would like to clone a Volume. - -A Clone is defined as a duplicate of an existing Kubernetes Volume that can be -consumed as any standard Volume would be. The only difference is that upon -provisioning, rather than creating a "new" empty Volume, the back end device -creates an exact duplicate of the specified Volume. - -Refer to [clone-doc](https://kubernetes.io/docs/concepts/storage/volume-pvc-datasource/) -for more info. - -## RBD Volume Cloning - -### Volume Clone Prerequisites - - 1. Requires Kubernetes v1.16+ which supports volume clone. - 2. Ceph-csi diver v3.0.0+ which supports volume clone. - -### Volume Cloning - -In -[pvc-clone](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml), -`dataSource` should be the name of the `PVC` which is already created by RBD -CSI driver. The `dataSource` kind should be the `PersistentVolumeClaim` and also storageclass -should be same as the source `PVC`. - -Create a new PVC Clone from the PVC - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml -``` - -### Verify RBD volume Clone PVC Creation - -```console -kubectl get pvc -``` - ->``` ->NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE ->rbd-pvc Bound pvc-74734901-577a-11e9-b34f-525400581048 1Gi >RWO rook-ceph-block 34m ->rbd-pvc-clone Bound pvc-70473135-577f-11e9-b34f-525400581048 1Gi RWO rook-ceph-block 8s ->``` - -## RBD clone resource Cleanup - -To clean your cluster of the resources created by this example, run the following: - -```console -kubectl delete -f cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml -``` - -## CephFS Volume Cloning - -### Volume Clone Prerequisites - - 1. Requires Kubernetes v1.16+ which supports volume clone. - 2. Ceph-csi diver v3.1.0+ which supports volume clone. - -### Volume Cloning - -In -[pvc-clone](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml), -`dataSource` should be the name of the `PVC` which is already created by CephFS -CSI driver. The `dataSource` kind should be the `PersistentVolumeClaim` and also storageclass -should be same as the source `PVC`. - -Create a new PVC Clone from the PVC - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml -``` - -### Verify CephFS volume Clone PVC Creation - -```console -kubectl get pvc -``` - ->``` ->NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE ->cephfs-pvc Bound pvc-1ea51547-a88b-4ab0-8b4a-812caeaf025d 1Gi RWX rook-cephfs 39m ->cephfs-pvc-clone Bound pvc-b575bc35-d521-4c41-b4f9-1d733cd28fdf 1Gi RWX rook-cephfs 8s ->``` - -## CephFS clone resource Cleanup - -To clean your cluster of the resources created by this example, run the following: - -```console -kubectl delete -f cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml -``` diff --git a/Documentation/ceph-dashboard.md b/Documentation/ceph-dashboard.md deleted file mode 100755 index 8f7490654..000000000 --- a/Documentation/ceph-dashboard.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -title: Ceph Dashboard -weight: 2400 -indent: true ---- - -# Ceph Dashboard - -The dashboard is a very helpful tool to give you an overview of the status of your Ceph cluster, including overall health, -status of the mon quorum, status of the mgr, osd, and other Ceph daemons, view pools and PG status, show logs for the daemons, -and more. Rook makes it simple to enable the dashboard. - -![The Ceph dashboard](media/ceph-dashboard.png) - -## Enable the Ceph Dashboard - -The [dashboard](https://docs.ceph.com/en/latest/mgr/dashboard/) can be enabled with settings in the CephCluster CRD. The CephCluster CRD must have the dashboard `enabled` setting set to `true`. -This is the default setting in the example manifests. - -```yaml - spec: - dashboard: - enabled: true -``` - -The Rook operator will enable the ceph-mgr dashboard module. A service object will be created to expose that port inside the Kubernetes cluster. Rook will -enable port 8443 for https access. - -This example shows that port 8443 was configured. - -```console -kubectl -n rook-ceph get service -``` - ->``` ->NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ->rook-ceph-mgr ClusterIP 10.108.111.192 9283/TCP 3h ->rook-ceph-mgr-dashboard ClusterIP 10.110.113.240 8443/TCP 3h ->``` - -The first service is for reporting the [Prometheus metrics](ceph-monitoring.md), while the latter service is for the dashboard. -If you are on a node in the cluster, you will be able to connect to the dashboard by using either the -DNS name of the service at `https://rook-ceph-mgr-dashboard-https:8443` or by connecting to the cluster IP, -in this example at `https://10.110.113.240:8443`. - -> **IMPORTANT:** Please note the dashboard will only be enabled for the first Ceph object store created by Rook. - -### Login Credentials - -After you connect to the dashboard you will need to login for secure access. Rook creates a default user named -`admin` and generates a secret called `rook-ceph-dashboard-admin-password` in the namespace where the Rook Ceph cluster is running. -To retrieve the generated password, you can run the following: - -```console -kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo -``` - -## Configure the Dashboard - -The following dashboard configuration settings are supported: - -```yaml - spec: - dashboard: - urlPrefix: /ceph-dashboard - port: 8443 - ssl: true -``` - -* `urlPrefix` If you are accessing the dashboard via a reverse proxy, you may - wish to serve it under a URL prefix. To get the dashboard to use hyperlinks - that include your prefix, you can set the `urlPrefix` setting. -* `port` The port that the dashboard is served on may be changed from the - default using the `port` setting. The corresponding K8s service exposing the - port will automatically be updated. -* `ssl` The dashboard may be served without SSL (useful for when you deploy the - dashboard behind a proxy already served using SSL) by setting the `ssl` option - to be false. - -## Viewing the Dashboard External to the Cluster - -Commonly you will want to view the dashboard from outside the cluster. For example, on a development machine with the -cluster running inside minikube you will want to access the dashboard from the host. - -There are several ways to expose a service that will depend on the environment you are running in. -You can use an [Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress/) or [other methods](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) for exposing services such as -NodePort, LoadBalancer, or ExternalIPs. - -### Node Port - -The simplest way to expose the service in minikube or similar environment is using the NodePort to open a port on the -VM that can be accessed by the host. To create a service with the NodePort, save this yaml as `dashboard-external-https.yaml`. - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: rook-ceph-mgr-dashboard-external-https - namespace: rook-ceph - labels: - app: rook-ceph-mgr - rook_cluster: rook-ceph -spec: - ports: - - name: dashboard - port: 8443 - protocol: TCP - targetPort: 8443 - selector: - app: rook-ceph-mgr - rook_cluster: rook-ceph - sessionAffinity: None - type: NodePort -``` - -Now create the service: - -```console -kubectl create -f dashboard-external-https.yaml -``` - -You will see the new service `rook-ceph-mgr-dashboard-external-https` created: - -```console -kubectl -n rook-ceph get service -``` - ->``` ->NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ->rook-ceph-mgr ClusterIP 10.108.111.192 9283/TCP 4h ->rook-ceph-mgr-dashboard ClusterIP 10.110.113.240 8443/TCP 4h ->rook-ceph-mgr-dashboard-external-https NodePort 10.101.209.6 8443:31176/TCP 4h ->``` - -In this example, port `31176` will be opened to expose port `8443` from the ceph-mgr pod. Find the ip address -of the VM. If using minikube, you can run `minikube ip` to find the ip address. -Now you can enter the URL in your browser such as `https://192.168.99.110:31176` and the dashboard will appear. - -### Load Balancer - -If you have a cluster on a cloud provider that supports load balancers, -you can create a service that is provisioned with a public hostname. -The yaml is the same as `dashboard-external-https.yaml` except for the following property: - -```yaml -spec: -[...] - type: LoadBalancer -``` - -Now create the service: - -```console -kubectl create -f dashboard-loadbalancer.yaml -``` - -You will see the new service `rook-ceph-mgr-dashboard-loadbalancer` created: - -```console -kubectl -n rook-ceph get service -``` - ->``` ->NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ->rook-ceph-mgr ClusterIP 172.30.11.40 9283/TCP 4h ->rook-ceph-mgr-dashboard ClusterIP 172.30.203.185 8443/TCP 4h ->rook-ceph-mgr-dashboard-loadbalancer LoadBalancer 172.30.27.242 a7f23e8e2839511e9b7a5122b08f2038-1251669398.us-east-1.elb.amazonaws.com 8443:32747/TCP 4h ->``` - -Now you can enter the URL in your browser such as `https://a7f23e8e2839511e9b7a5122b08f2038-1251669398.us-east-1.elb.amazonaws.com:8443` and the dashboard will appear. - -### Ingress Controller - -If you have a cluster with an [nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) -and a Certificate Manager (e.g. [cert-manager](https://cert-manager.readthedocs.io/)) then you can create an -Ingress like the one below. This example achieves four things: - -1. Exposes the dashboard on the Internet (using an reverse proxy) -2. Issues an valid TLS Certificate for the specified domain name (using [ACME](https://en.wikipedia.org/wiki/Automated_Certificate_Management_Environment)) -3. Tells the reverse proxy that the dashboard itself uses HTTPS -4. Tells the reverse proxy that the dashboard itself does not have a valid certificate (it is self-signed) - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: rook-ceph-mgr-dashboard - namespace: rook-ceph - annotations: - kubernetes.io/ingress.class: "nginx" - kubernetes.io/tls-acme: "true" - nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - nginx.ingress.kubernetes.io/server-snippet: | - proxy_ssl_verify off; -spec: - tls: - - hosts: - - rook-ceph.example.com - secretName: rook-ceph.example.com - rules: - - host: rook-ceph.example.com - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: rook-ceph-mgr-dashboard - port: - name: https-dashboard -``` - -Customise the Ingress resource to match your cluster. Replace the example domain name `rook-ceph.example.com` -with a domain name that will resolve to your Ingress Controller (creating the DNS entry if required). - -Now create the Ingress: - -```console -kubectl create -f dashboard-ingress-https.yaml -``` - -You will see the new Ingress `rook-ceph-mgr-dashboard` created: - -```console -kubectl -n rook-ceph get ingress -``` - ->``` ->NAME HOSTS ADDRESS PORTS AGE ->rook-ceph-mgr-dashboard rook-ceph.example.com 80, 443 5m ->``` - -And the new Secret for the TLS certificate: - -```console -kubectl -n rook-ceph get secret rook-ceph.example.com -``` - ->``` ->NAME TYPE DATA AGE ->rook-ceph.example.com kubernetes.io/tls 2 4m ->``` - -You can now browse to `https://rook-ceph.example.com/` to log into the dashboard. diff --git a/Documentation/ceph-disaster-recovery.md b/Documentation/ceph-disaster-recovery.md deleted file mode 100644 index 0d230e986..000000000 --- a/Documentation/ceph-disaster-recovery.md +++ /dev/null @@ -1,452 +0,0 @@ ---- -title: Disaster Recovery -weight: 11600 -indent: true ---- - -# Disaster Recovery - -Under extenuating circumstances, steps may be necessary to recover the cluster health. There are several types of recovery addressed in this document: -* [Restoring Mon Quorum](#restoring-mon-quorum) -* [Restoring CRDs After Deletion](#restoring-crds-after-deletion) -* [Adopt an existing Rook Ceph cluster into a new Kubernetes cluster](#adopt-an-existing-rook-ceph-cluster-into-a-new-kubernetes-cluster) -* [Backing up and restoring a cluster based on PVCs into a new Kubernetes cluster](#backing-up-and-restoring-a-cluster-based-on-pvcs-into-a-new-kubernetes-cluster) - -## Restoring Mon Quorum - -Under extenuating circumstances, the mons may lose quorum. If the mons cannot form quorum again, -there is a manual procedure to get the quorum going again. The only requirement is that at least one mon -is still healthy. The following steps will remove the unhealthy -mons from quorum and allow you to form a quorum again with a single mon, then grow the quorum back to the original size. - -For example, if you have three mons and lose quorum, you will need to remove the two bad mons from quorum, notify the good mon -that it is the only mon in quorum, and then restart the good mon. - -### Stop the operator - -First, stop the operator so it will not try to failover the mons while we are modifying the monmap - -```console -kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=0 -``` - -### Inject a new monmap - -> **WARNING**: Injecting a monmap must be done very carefully. If run incorrectly, your cluster could be permanently destroyed. - -The Ceph monmap keeps track of the mon quorum. We will update the monmap to only contain the healthy mon. -In this example, the healthy mon is `rook-ceph-mon-b`, while the unhealthy mons are `rook-ceph-mon-a` and `rook-ceph-mon-c`. - -Take a backup of the current `rook-ceph-mon-b` Deployment: - -```console -kubectl -n rook-ceph get deployment rook-ceph-mon-b -o yaml > rook-ceph-mon-b-deployment.yaml -``` - -Open the file and copy the `command` and `args` from the `mon` container (see `containers` list). This is needed for the monmap changes. -Cleanup the copied `command` and `args` fields to form a pastable command. -Example: - -The following parts of the `mon` container: - -```yaml -[...] - containers: - - args: - - --fsid=41a537f2-f282-428e-989f-a9e07be32e47 - - --keyring=/etc/ceph/keyring-store/keyring - - --log-to-stderr=true - - --err-to-stderr=true - - --mon-cluster-log-to-stderr=true - - '--log-stderr-prefix=debug ' - - --default-log-to-file=false - - --default-mon-cluster-log-to-file=false - - --mon-host=$(ROOK_CEPH_MON_HOST) - - --mon-initial-members=$(ROOK_CEPH_MON_INITIAL_MEMBERS) - - --id=b - - --setuser=ceph - - --setgroup=ceph - - --foreground - - --public-addr=10.100.13.242 - - --setuser-match-path=/var/lib/ceph/mon/ceph-b/store.db - - --public-bind-addr=$(ROOK_POD_IP) - command: - - ceph-mon -[...] -``` - -Should be made into a command like this: (**do not copy the example command!**) - -```console -ceph-mon \ - --fsid=41a537f2-f282-428e-989f-a9e07be32e47 \ - --keyring=/etc/ceph/keyring-store/keyring \ - --log-to-stderr=true \ - --err-to-stderr=true \ - --mon-cluster-log-to-stderr=true \ - --log-stderr-prefix=debug \ - --default-log-to-file=false \ - --default-mon-cluster-log-to-file=false \ - --mon-host=$ROOK_CEPH_MON_HOST \ - --mon-initial-members=$ROOK_CEPH_MON_INITIAL_MEMBERS \ - --id=b \ - --setuser=ceph \ - --setgroup=ceph \ - --foreground \ - --public-addr=10.100.13.242 \ - --setuser-match-path=/var/lib/ceph/mon/ceph-b/store.db \ - --public-bind-addr=$ROOK_POD_IP -``` - -(be sure to remove the single quotes around the `--log-stderr-prefix` flag and the parenthesis around the variables being passed ROOK_CEPH_MON_HOST, ROOK_CEPH_MON_INITIAL_MEMBERS and ROOK_POD_IP ) - -Patch the `rook-ceph-mon-b` Deployment to stop this mon working without deleting the mon pod: - -```console -kubectl -n rook-ceph patch deployment rook-ceph-mon-b --type='json' -p '[{"op":"remove", "path":"/spec/template/spec/containers/0/livenessProbe"}]' - -kubectl -n rook-ceph patch deployment rook-ceph-mon-b -p '{"spec": {"template": {"spec": {"containers": [{"name": "mon", "command": ["sleep", "infinity"], "args": []}]}}}}' -``` - -Connect to the pod of a healthy mon and run the following commands. - -```console -kubectl -n rook-ceph exec -it bash -``` ->``` -># set a few simple variables ->cluster_namespace=rook-ceph ->good_mon_id=b ->monmap_path=/tmp/monmap -> -># extract the monmap to a file, by pasting the ceph mon command -># from the good mon deployment and adding the -># `--extract-monmap=${monmap_path}` flag ->ceph-mon \ -> --fsid=41a537f2-f282-428e-989f-a9e07be32e47 \ -> --keyring=/etc/ceph/keyring-store/keyring \ -> --log-to-stderr=true \ -> --err-to-stderr=true \ -> --mon-cluster-log-to-stderr=true \ -> --log-stderr-prefix=debug \ -> --default-log-to-file=false \ -> --default-mon-cluster-log-to-file=false \ -> --mon-host=$ROOK_CEPH_MON_HOST \ -> --mon-initial-members=$ROOK_CEPH_MON_INITIAL_MEMBERS \ -> --id=b \ -> --setuser=ceph \ -> --setgroup=ceph \ -> --foreground \ -> --public-addr=10.100.13.242 \ -> --setuser-match-path=/var/lib/ceph/mon/ceph-b/store.db \ -> --public-bind-addr=$ROOK_POD_IP \ -> --extract-monmap=${monmap_path} -> -># review the contents of the monmap ->monmaptool --print /tmp/monmap -> -># remove the bad mon(s) from the monmap ->monmaptool ${monmap_path} --rm -> -># in this example we remove mon0 and mon2: ->monmaptool ${monmap_path} --rm a ->monmaptool ${monmap_path} --rm c -> -># inject the modified monmap into the good mon, by pasting -># the ceph mon command and adding the -># `--inject-monmap=${monmap_path}` flag, like this ->ceph-mon \ -> --fsid=41a537f2-f282-428e-989f-a9e07be32e47 \ -> --keyring=/etc/ceph/keyring-store/keyring \ -> --log-to-stderr=true \ -> --err-to-stderr=true \ -> --mon-cluster-log-to-stderr=true \ -> --log-stderr-prefix=debug \ -> --default-log-to-file=false \ -> --default-mon-cluster-log-to-file=false \ -> --mon-host=$ROOK_CEPH_MON_HOST \ -> --mon-initial-members=$ROOK_CEPH_MON_INITIAL_MEMBERS \ -> --id=b \ -> --setuser=ceph \ -> --setgroup=ceph \ -> --foreground \ -> --public-addr=10.100.13.242 \ -> --setuser-match-path=/var/lib/ceph/mon/ceph-b/store.db \ -> --public-bind-addr=$ROOK_POD_IP \ -> --inject-monmap=${monmap_path} ->``` - -Exit the shell to continue. - -### Edit the Rook configmaps - -Edit the configmap that the operator uses to track the mons. - -```console -kubectl -n rook-ceph edit configmap rook-ceph-mon-endpoints -``` - -In the `data` element you will see three mons such as the following (or more depending on your `moncount`): - -```yaml -data: a=10.100.35.200:6789;b=10.100.13.242:6789;c=10.100.35.12:6789 -``` - -Delete the bad mons from the list, for example to end up with a single good mon: - -```yaml -data: b=10.100.13.242:6789 -``` - -Save the file and exit. - -Now we need to adapt a Secret which is used for the mons and other components. -The following `kubectl patch` command is an easy way to do that. In the end it patches the `rook-ceph-config` secret and updates the two key/value pairs `mon_host` and `mon_initial_members`. - -```console -mon_host=$(kubectl -n rook-ceph get svc rook-ceph-mon-b -o jsonpath='{.spec.clusterIP}') -kubectl -n rook-ceph patch secret rook-ceph-config -p '{"stringData": {"mon_host": "[v2:'"${mon_host}"':3300,v1:'"${mon_host}"':6789]", "mon_initial_members": "'"${good_mon_id}"'"}}' -``` - -> **NOTE**: If you are using `hostNetwork: true`, you need to replace the `mon_host` var with the node IP the mon is pinned to (`nodeSelector`). This is because there is no `rook-ceph-mon-*` service created in that "mode". - -### Restart the mon - -You will need to "restart" the good mon pod with the original `ceph-mon` command to pick up the changes. For this run `kubectl replace` on the backup of the mon deployment yaml: - -```console -kubectl replace --force -f rook-ceph-mon-b-deployment.yaml -``` - -> **NOTE**: Option `--force` will delete the deployment and create a new one - -Start the rook [toolbox](/Documentation/ceph-toolbox.md) and verify the status of the cluster. - -```console -ceph -s -``` - -The status should show one mon in quorum. If the status looks good, your cluster should be healthy again. - -### Restart the operator -Start the rook operator again to resume monitoring the health of the cluster. -```console -# create the operator. it is safe to ignore the errors that a number of resources already exist. -kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=1 -``` - -The operator will automatically add more mons to increase the quorum size again, depending on the `mon.count`. - -## Restoring CRDs After Deletion - -When the Rook CRDs are deleted, the Rook operator will respond to the deletion event to attempt to clean up the cluster resources. -If any data appears present in the cluster, Rook will refuse to allow the resources to be deleted since the operator will -refuse to remove the finalizer on the CRs until the underlying data is deleted. For more details, see the -[dependency design doc](https://github.com/rook/rook/blob/master/design/ceph/resource-dependencies.md). - -While it is good that the CRs will not be deleted and the underlying Ceph data and daemons continue to be -available, the CRs will be stuck indefinitely in a `Deleting` state in which the operator will not -continue to ensure cluster health. Upgrades will be blocked, further updates to the CRs are prevented, and so on. -Since Kubernetes does not allow undeleting resources, the following procedure will allow you to restore -the CRs to their prior state without even necessarily suffering cluster downtime. - -1. Scale down the operator - -```console -kubectl -n rook-ceph scale --replicas=0 deploy/rook-ceph-operator -``` - -2. Backup all Rook CRs and critical metadata - -```console -# Store the CephCluster CR settings. Also, save other Rook CRs that are in terminating state. -kubectl -n rook-ceph get cephcluster rook-ceph -o yaml > cluster.yaml - -# Backup critical secrets and configmaps in case something goes wrong later in the procedure -kubectl -n rook-ceph get secret -o yaml > secrets.yaml -kubectl -n rook-ceph get configmap -o yaml > configmaps.yaml -``` - -3. Remove the owner references from all critical Rook resources that were referencing the CephCluster CR. - The critical resources include: - - Secrets: `rook-ceph-admin-keyring`, `rook-ceph-config`, `rook-ceph-mon`, `rook-ceph-mons-keyring` - - ConfigMap: `rook-ceph-mon-endpoints` - - Services: `rook-ceph-mon-*`, `rook-ceph-mgr-*` - - Deployments: `rook-ceph-mon-*`, `rook-ceph-osd-*`, `rook-ceph-mgr-*` - - PVCs (if applicable): `rook-ceph-mon-*` and the OSD PVCs (named `-*`, for example `set1-data-*`) - -For example, remove this entire block from each resource: - -```yaml -ownerReferences: -- apiVersion: ceph.rook.io/v1 - blockOwnerDeletion: true - controller: true - kind: CephCluster - name: rook-ceph - uid: -``` - -4. **After confirming all critical resources have had the owner reference to the CephCluster CR removed**, now - we allow the cluster CR to be deleted. Remove the finalizer by editing the CephCluster CR. - -```console -kubectl -n rook-ceph edit cephcluster -``` - -For example, remove the following from the CR metadata: - -```yaml - finalizers: - - cephcluster.ceph.rook.io -``` - -After the finalizer is removed, the CR will be immediately deleted. If all owner references were properly removed, -all ceph daemons will continue running and there will be no downtime. - -5. Create the CephCluster CR with the same settings as previously - -```shell -# Use the same cluster settings as exported above in step 2. -kubectl create -f cluster.yaml -``` - -6. If there are other CRs in terminating state such as CephBlockPools, CephObjectStores, or CephFilesystems, - follow the above steps as well for those CRs: - - Backup the CR - - Remove the finalizer and confirm the CR is deleted (the underlying Ceph resources will be preserved) - - Create the CR again - -7. Scale up the operator - -```shell -kubectl -n rook-ceph --replicas=1 deploy/rook-ceph-operator -``` - -Watch the operator log to confirm that the reconcile completes successfully. - -## Adopt an existing Rook Ceph cluster into a new Kubernetes cluster - -Situations this section can help resolve: - -1. The Kubernetes environment underlying a running Rook Ceph cluster failed catastrophically, requiring a new Kubernetes environment in which the user wishes to recover the previous Rook Ceph cluster. -2. The user wishes to migrate their existing Rook Ceph cluster to a new Kubernetes environment, and downtime can be tolerated. - -### Prerequisites - -1. A working Kubernetes cluster to which we will migrate the previous Rook Ceph cluster. -2. At least one Ceph mon db is in quorum, and sufficient number of Ceph OSD is `up` and `in` before disaster. -3. The previous Rook Ceph cluster is not running. - -### Overview for Steps below - -1. Start a new and clean Rook Ceph cluster, with old `CephCluster` `CephBlockPool` `CephFilesystem` `CephNFS` `CephObjectStore`. -2. Shut the new cluster down when it has been created successfully. -3. Replace ceph-mon data with that of the old cluster. -4. Replace `fsid` in `secrets/rook-ceph-mon` with that of the old one. -5. Fix monmap in ceph-mon db. -6. Fix ceph mon auth key. -7. Disable auth. -8. Start the new cluster, watch it resurrect. -9. Fix admin auth key, and enable auth. -10. Restart cluster for the final time. - -### Steps - -Assuming `dataHostPathData` is `/var/lib/rook`, and the `CephCluster` trying to adopt is named `rook-ceph`. - -1. Make sure the old Kubernetes cluster is completely torn down and the new Kubernetes cluster is up and running without Rook Ceph. -1. Backup `/var/lib/rook` in all the Rook Ceph nodes to a different directory. Backups will be used later. -1. Pick a `/var/lib/rook/rook-ceph/rook-ceph.config` from any previous Rook Ceph node and save the old cluster `fsid` from its content. -1. Remove `/var/lib/rook` from all the Rook Ceph nodes. -1. Add identical `CephCluster` descriptor to the new Kubernetes cluster, especially identical `spec.storage.config` and `spec.storage.nodes`, except `mon.count`, which should be set to `1`. -1. Add identical `CephFilesystem` `CephBlockPool` `CephNFS` `CephObjectStore` descriptors (if any) to the new Kubernetes cluster. -1. Install Rook Ceph in the new Kubernetes cluster. -1. Watch the operator logs with `kubectl -n rook-ceph logs -f rook-ceph-operator-xxxxxxx`, and wait until the orchestration has settled. -1. **STATE**: Now the cluster will have `rook-ceph-mon-a`, `rook-ceph-mgr-a`, and all the auxiliary pods up and running, and zero (hopefully) `rook-ceph-osd-ID-xxxxxx` running. `ceph -s` output should report 1 mon, 1 mgr running, and all of the OSDs down, all PGs are in `unknown` state. Rook should not start any OSD daemon since all devices belongs to the old cluster (which have a different `fsid`). -1. Run `kubectl -n rook-ceph exec -it rook-ceph-mon-a-xxxxxxxx bash` to enter the `rook-ceph-mon-a` pod, - - ```shell - mon-a# cat /etc/ceph/keyring-store/keyring # save this keyring content for later use - mon-a# exit - ``` - -1. Stop the Rook operator by running `kubectl -n rook-ceph edit deploy/rook-ceph-operator` and set `replicas` to `0`. -1. Stop cluster daemons by running `kubectl -n rook-ceph delete deploy/X` where X is every deployment in namespace `rook-ceph`, except `rook-ceph-operator` and `rook-ceph-tools`. -1. Save the `rook-ceph-mon-a` address with `kubectl -n rook-ceph get cm/rook-ceph-mon-endpoints -o yaml` in the new Kubernetes cluster for later use. - -1. SSH to the host where `rook-ceph-mon-a` in the new Kubernetes cluster resides. - 1. Remove `/var/lib/rook/mon-a` - 2. Pick a healthy `rook-ceph-mon-ID` directory (`/var/lib/rook/mon-ID`) in the previous backup, copy to `/var/lib/rook/mon-a`. `ID` is any healthy mon node ID of the old cluster. - 3. Replace `/var/lib/rook/mon-a/keyring` with the saved keyring, preserving only the `[mon.]` section, remove `[client.admin]` section. - 4. Run `docker run -it --rm -v /var/lib/rook:/var/lib/rook ceph/ceph:v14.2.1-20190430 bash`. The Docker image tag should match the Ceph version used in the Rook cluster. The `/etc/ceph/ceph.conf` file needs to exist for `ceph-mon` to work. - - ```shell - touch /etc/ceph/ceph.conf - cd /var/lib/rook - ceph-mon --extract-monmap monmap --mon-data ./mon-a/data # Extract monmap from old ceph-mon db and save as monmap - monmaptool --print monmap # Print the monmap content, which reflects the old cluster ceph-mon configuration. - monmaptool --rm a monmap # Delete `a` from monmap. - monmaptool --rm b monmap # Repeat, and delete `b` from monmap. - monmaptool --rm c monmap # Repeat this pattern until all the old ceph-mons are removed - monmaptool --rm d monmap - monmaptool --rm e monmap - monmaptool --addv a [v2:10.77.2.216:3300,v1:10.77.2.216:6789] monmap # Replace it with the rook-ceph-mon-a address you got from previous command. - ceph-mon --inject-monmap monmap --mon-data ./mon-a/data # Replace monmap in ceph-mon db with our modified version. - rm monmap - exit - ``` - -1. Tell Rook to run as old cluster by running `kubectl -n rook-ceph edit secret/rook-ceph-mon` and changing `fsid` to the original `fsid`. Note that the `fsid` is base64 encoded and must not contain a trailing carriage return. For example: - - ```shell - $ echo -n a811f99a-d865-46b7-8f2c-f94c064e4356 | base64 # Replace with the fsid from your old cluster. - ``` - -1. Disable authentication by running `kubectl -n rook-ceph edit cm/rook-config-override` and adding content below: - - ```yaml - data: - config: | - [global] - auth cluster required = none - auth service required = none - auth client required = none - auth supported = none - ``` - -1. Bring the Rook Ceph operator back online by running `kubectl -n rook-ceph edit deploy/rook-ceph-operator` and set `replicas` to `1`. -1. Watch the operator logs with `kubectl -n rook-ceph logs -f rook-ceph-operator-xxxxxxx`, and wait until the orchestration has settled. -1. **STATE**: Now the new cluster should be up and running with authentication disabled. `ceph -s` should report 1 mon & 1 mgr & all of the OSDs up and running, and all PGs in either `active` or `degraded` state. -1. Run `kubectl -n rook-ceph exec -it rook-ceph-tools-XXXXXXX bash` to enter tools pod: - - ```console - vi key - # [paste keyring content saved before, preserving only `[client admin]` section] - ceph auth import -i key - rm key - ``` - -1. Re-enable authentication by running `kubectl -n rook-ceph edit cm/rook-config-override` and removing auth configuration added in previous steps. -1. Stop the Rook operator by running `kubectl -n rook-ceph edit deploy/rook-ceph-operator` and set `replicas` to `0`. -1. Shut down entire new cluster by running `kubectl -n rook-ceph delete deploy/X` where X is every deployment in namespace `rook-ceph`, except `rook-ceph-operator` and `rook-ceph-tools`, again. This time OSD daemons are present and should be removed too. -1. Bring the Rook Ceph operator back online by running `kubectl -n rook-ceph edit deploy/rook-ceph-operator` and set `replicas` to `1`. -1. Watch the operator logs with `kubectl -n rook-ceph logs -f rook-ceph-operator-xxxxxxx`, and wait until the orchestration has settled. -1. **STATE**: Now the new cluster should be up and running with authentication enabled. `ceph -s` output should not change much comparing to previous steps. - -## Backing up and restoring a cluster based on PVCs into a new Kubernetes cluster - -It is possible to migrate/restore an rook/ceph cluster from an existing Kubernetes cluster to a new one without resorting to SSH access or ceph tooling. This allows doing the migration using standard kubernetes resources only. This guide assumes the following -1. You have a CephCluster that uses PVCs to persist mon and osd data. Let's call it the "old cluster" -1. You can restore the PVCs as-is in the new cluster. Usually this is done by taking regular snapshots of the PVC volumes and using a tool that can re-create PVCs from these snapshots in the underlying cloud provider. Velero is one such tool. (https://github.com/vmware-tanzu/velero) -1. You have regular backups of the secrets and configmaps in the rook-ceph namespace. Velero provides this functionality too. - -Do the following in the new cluster: -1. Stop the rook operator by scaling the deployment `rook-ceph-operator` down to zero: `kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas 0` -and deleting the other deployments. An example command to do this is `k -n rook-ceph delete deployment -l operator!=rook` -1. Restore the rook PVCs to the new cluster. -1. Copy the keyring and fsid secrets from the old cluster: `rook-ceph-mgr-a-keyring`, `rook-ceph-mon`, `rook-ceph-mons-keyring`, `rook-ceph-osd-0-keyring`, ... -1. Delete mon services and copy them from the old cluster: `rook-ceph-mon-a`, `rook-ceph-mon-b`, ... Note that simply re-applying won't work because the goal here is to restore the `clusterIP` in each service and this field is immutable in `Service` resources. -1. Copy the endpoints configmap from the old cluster: `rook-ceph-mon-endpoints` -1. Scale the rook operator up again : `kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas 1` -1. Wait until the reconciliation is over. diff --git a/Documentation/ceph-examples.md b/Documentation/ceph-examples.md deleted file mode 100644 index 1c4221227..000000000 --- a/Documentation/ceph-examples.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Examples -weight: 2050 -indent: true ---- -{% include_relative branch.liquid %} - -# Ceph Examples - -Configuration for Rook and Ceph can be configured in multiple ways to provide block devices, shared filesystem volumes or object storage in a kubernetes namespace. We have provided several examples to simplify storage setup, but remember there are many tunables and you will need to decide what settings work for your use case and environment. - -See the **[example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph)** folder for all the rook/ceph setup example spec files. - -## Common Resources - -The first step to deploy Rook is to create the CRDs and other common resources. The configuration for these resources will be the same for most deployments. -The [crds.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/crds.yaml) and -[common.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/common.yaml) sets these resources up. - -```console -kubectl create -f crds.yaml -f common.yaml -``` - -The examples all assume the operator and all Ceph daemons will be started in the same namespace. If you want to deploy the operator in a separate namespace, see the comments throughout `common.yaml`. - -## Operator - -After the common resources are created, the next step is to create the Operator deployment. Several spec file examples are provided in [this directory](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/): - -* `operator.yaml`: The most common settings for production deployments - * `kubectl create -f operator.yaml` -* `operator-openshift.yaml`: Includes all of the operator settings for running a basic Rook cluster in an OpenShift environment. You will also want to review the [OpenShift Prerequisites](ceph-openshift.md) to confirm the settings. - * `oc create -f operator-openshift.yaml` - -Settings for the operator are configured through environment variables on the operator deployment. The individual settings are documented in [operator.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/operator.yaml). - -## Cluster CRD - -Now that your operator is running, let's create your Ceph storage cluster. This CR contains the most critical settings -that will influence how the operator configures the storage. It is important to understand the various ways to configure -the cluster. These examples represent a very small set of the different ways to configure the storage. - -* `cluster.yaml`: This file contains common settings for a production storage cluster. Requires at least three worker nodes. -* `cluster-test.yaml`: Settings for a test cluster where redundancy is not configured. Requires only a single node. -* `cluster-on-pvc.yaml`: This file contains common settings for backing the Ceph Mons and OSDs by PVs. Useful when running in cloud environments or where local PVs have been created for Ceph to consume. -* `cluster-external.yaml`: Connect to an [external Ceph cluster](ceph-cluster-crd.md#external-cluster) with minimal access to monitor the health of the cluster and connect to the storage. -* `cluster-external-management.yaml`: Connect to an [external Ceph cluster](ceph-cluster-crd.md#external-cluster) with the admin key of the external cluster to enable - remote creation of pools and configure services such as an [Object Store](ceph-object.md) or a [Shared Filesystem](ceph-filesystem.md). -* `cluster-stretched.yaml`: Create a cluster in "stretched" mode, with five mons stretched across three zones, and the OSDs across two zones. See the [Stretch documentation](ceph-cluster-crd.md#stretch-cluster). - -See the [Cluster CRD](ceph-cluster-crd.md) topic for more details and more examples for the settings. - -## Setting up consumable storage - -Now we are ready to setup [block](https://ceph.com/ceph-storage/block-storage/), [shared filesystem](https://ceph.com/ceph-storage/file-system/) or [object storage](https://ceph.com/ceph-storage/object-storage/) in the Rook Ceph cluster. These kinds of storage are respectively referred to as CephBlockPool, CephFilesystem and CephObjectStore in the spec files. - -### Block Devices - -Ceph can provide raw block device volumes to pods. Each example below sets up a storage class which can then be used to provision a block device in kubernetes pods. The storage class is defined with [a pool](http://docs.ceph.com/docs/master/rados/operations/pools/) which defines the level of data redundancy in Ceph: - -* `storageclass.yaml`: This example illustrates replication of 3 for production scenarios and requires at least three worker nodes. Your data is replicated on three different kubernetes worker nodes and intermittent or long-lasting single node failures will not result in data unavailability or loss. -* `storageclass-ec.yaml`: Configures erasure coding for data durability rather than replication. [Ceph's erasure coding](http://docs.ceph.com/docs/master/rados/operations/erasure-code/) is more efficient than replication so you can get high reliability without the 3x replication cost of the preceding example (but at the cost of higher computational encoding and decoding costs on the worker nodes). Erasure coding requires at least three worker nodes. See the [Erasure coding](ceph-pool-crd.md#erasure-coded) documentation for more details. -* `storageclass-test.yaml`: Replication of 1 for test scenarios and it requires only a single node. Do not use this for applications that store valuable data or have high-availability storage requirements, since a single node failure can result in data loss. - -The storage classes are found in different sub-directories depending on the driver: - -* `csi/rbd`: The CSI driver for block devices. This is the preferred driver going forward. -* `flex`: The flex driver will be deprecated in a future release to be determined. - -See the [Ceph Pool CRD](ceph-pool-crd.md) topic for more details on the settings. - -### Shared Filesystem - -Ceph filesystem (CephFS) allows the user to 'mount' a shared posix-compliant folder into one or more hosts (pods in the container world). This storage is similar to NFS shared storage or CIFS shared folders, as explained [here](https://ceph.com/ceph-storage/file-system/). - -File storage contains multiple pools that can be configured for different scenarios: - -* `filesystem.yaml`: Replication of 3 for production scenarios. Requires at least three worker nodes. -* `filesystem-ec.yaml`: Erasure coding for production scenarios. Requires at least three worker nodes. -* `filesystem-test.yaml`: Replication of 1 for test scenarios. Requires only a single node. - -Dynamic provisioning is possible with the CSI driver. The storage class for shared filesystems is found in the `csi/cephfs` directory. - -See the [Shared Filesystem CRD](ceph-filesystem-crd.md) topic for more details on the settings. - -### Object Storage - -Ceph supports storing blobs of data called objects that support HTTP(s)-type get/put/post and delete semantics. This storage is similar to AWS S3 storage, for example. - -Object storage contains multiple pools that can be configured for different scenarios: - -* `object.yaml`: Replication of 3 for production scenarios. Requires at least three worker nodes. -* `object-openshift.yaml`: Replication of 3 with rgw in a port range valid for OpenShift. Requires at least three worker nodes. -* `object-ec.yaml`: Erasure coding rather than replication for production scenarios. Requires at least three worker nodes. -* `object-test.yaml`: Replication of 1 for test scenarios. Requires only a single node. - -See the [Object Store CRD](ceph-object-store-crd.md) topic for more details on the settings. - -### Object Storage User - -* `object-user.yaml`: Creates a simple object storage user and generates credentials for the S3 API - -### Object Storage Buckets - -The Ceph operator also runs an object store bucket provisioner which can grant access to existing buckets or dynamically provision new buckets. - -* [object-bucket-claim-retain.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/object-bucket-claim-retain.yaml) Creates a request for a new bucket by referencing a StorageClass which saves the bucket when the initiating OBC is deleted. -* [object-bucket-claim-delete.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/object-bucket-claim-delete.yaml) Creates a request for a new bucket by referencing a StorageClass which deletes the bucket when the initiating OBC is deleted. -* [storageclass-bucket-retain.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/storageclass-bucket-retain.yaml) Creates a new StorageClass which defines the Ceph Object Store, a region, and retains the bucket after the initiating OBC is deleted. -* [storageclass-bucket-delete.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/storageclass-bucket-delete.yaml) Creates a new StorageClass which defines the Ceph Object Store, a region, and deletes the bucket after the initiating OBC is deleted. diff --git a/Documentation/ceph-filesystem-crd.md b/Documentation/ceph-filesystem-crd.md deleted file mode 100644 index 4b2b533cc..000000000 --- a/Documentation/ceph-filesystem-crd.md +++ /dev/null @@ -1,225 +0,0 @@ ---- -title: Shared Filesystem CRD -weight: 3000 -indent: true ---- -{% include_relative branch.liquid %} - -# Ceph Shared Filesystem CRD - -Rook allows creation and customization of shared filesystems through the custom resource definitions (CRDs). The following settings are available for Ceph filesystems. - -## Samples - -### Replicated - -> **NOTE**: This sample requires *at least 1 OSD per node*, with each OSD located on *3 different nodes*. - -Each OSD must be located on a different node, because both of the defined pools set the [`failureDomain`](ceph-pool-crd.md#spec) to `host` and the `replicated.size` to `3`. - -The `failureDomain` can also be set to another location type (e.g. `rack`), if it has been added as a `location` in the [Storage Selection Settings](ceph-cluster-crd.md#storage-selection-settings). - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: myfs - namespace: rook-ceph -spec: - metadataPool: - failureDomain: host - replicated: - size: 3 - dataPools: - - failureDomain: host - replicated: - size: 3 - preserveFilesystemOnDelete: true - metadataServer: - activeCount: 1 - activeStandby: true - # A key/value list of annotations - annotations: - # key: value - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - mds-node - # tolerations: - # - key: mds-node - # operator: Exists - # podAffinity: - # podAntiAffinity: - # topologySpreadConstraints: - resources: - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" -``` - -(These definitions can also be found in the [`filesystem.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/filesystem.yaml) file) - -### Erasure Coded - -Erasure coded pools require the OSDs to use `bluestore` for the configured [`storeType`](ceph-cluster-crd.md#osd-configuration-settings). Additionally, erasure coded pools can only be used with `dataPools`. The `metadataPool` must use a replicated pool. - -> **NOTE**: This sample requires *at least 3 bluestore OSDs*, with each OSD located on a *different node*. - -The OSDs must be located on different nodes, because the [`failureDomain`](ceph-pool-crd.md#spec) will be set to `host` by default, and the `erasureCoded` chunk settings require at least 3 different OSDs (2 `dataChunks` + 1 `codingChunks`). - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: myfs-ec - namespace: rook-ceph -spec: - metadataPool: - replicated: - size: 3 - dataPools: - - erasureCoded: - dataChunks: 2 - codingChunks: 1 - metadataServer: - activeCount: 1 - activeStandby: true -``` - -(These definitions can also be found in the [`filesystem-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/filesystem-ec.yaml) file. -Also see an example in the [`storageclass-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml) for how to configure the volume.) - -### Mirroring - -Ceph filesystem mirroring is a process of asynchronous replication of snapshots to a remote CephFS file system. -Snapshots are synchronized by mirroring snapshot data followed by creating a snapshot with the same name (for a given directory on the remote file system) as the snapshot being synchronized. -It is generally useful when planning for Disaster Recovery. -Mirroring is for clusters that are geographically distributed and stretching a single cluster is not possible due to high latencies. - -The following will enable mirroring of the filesystem: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: myfs - namespace: rook-ceph -spec: - metadataPool: - failureDomain: host - replicated: - size: 3 - dataPools: - - failureDomain: host - replicated: - size: 3 - preserveFilesystemOnDelete: true - metadataServer: - activeCount: 1 - activeStandby: true - mirroring: - enabled: true - # list of Kubernetes Secrets containing the peer token - # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers - peers: - secretNames: - - secondary-cluster-peer - # specify the schedule(s) on which snapshots should be taken - # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules - snapshotSchedules: - - path: / - interval: 24h # daily snapshots - startTime: 11:55 - # manage retention policies - # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies - snapshotRetention: - - path: / - duration: "h 24" -``` - -Once mirroring is enabled, Rook will by default create its own [bootstrap peer token](https://docs.ceph.com/en/latest/dev/cephfs-mirroring/?#bootstrap-peers) so that it can be used by another cluster. -The bootstrap peer token can be found in a Kubernetes Secret. The name of the Secret is present in the Status field of the CephFilesystem CR: - -```yaml -status: - info: - fsMirrorBootstrapPeerSecretName: fs-peer-token-myfs -``` - -This secret can then be fetched like so: - -```console -kubectl get secret -n rook-ceph fs-peer-token-myfs -o jsonpath='{.data.token}'|base64 -d -``` ->``` ->eyJmc2lkIjoiOTFlYWUwZGQtMDZiMS00ZDJjLTkxZjMtMTMxMWM5ZGYzODJiIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFEN1psOWZ3V1VGRHhBQWdmY0gyZi8xeUhYeGZDUTU5L1N0NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjEwLjEwMS4xOC4yMjM6MzMwMCx2MToxMC4xMDEuMTguMjIzOjY3ODldIn0= ->``` - -The secret must be decoded. The result will be another base64 encoded blob that you will import in the destination cluster: - -```console -external-cluster-console # ceph fs snapshot mirror peer_bootstrap import -``` - -See the official cephfs mirror documentation on [how to add a bootstrap peer](https://docs.ceph.com/en/latest/dev/cephfs-mirroring/). - -## Filesystem Settings - -### Metadata - -* `name`: The name of the filesystem to create, which will be reflected in the pool and other resource names. -* `namespace`: The namespace of the Rook cluster where the filesystem is created. - -### Pools - -The pools allow all of the settings defined in the Pool CRD spec. For more details, see the [Pool CRD](ceph-pool-crd.md) settings. In the example above, there must be at least three hosts (size 3) and at least eight devices (6 data + 2 coding chunks) in the cluster. - -* `metadataPool`: The settings used to create the filesystem metadata pool. Must use replication. -* `dataPools`: The settings to create the filesystem data pools. If multiple pools are specified, Rook will add the pools to the filesystem. Assigning users or files to a pool is left as an exercise for the reader with the [CephFS documentation](http://docs.ceph.com/docs/master/cephfs/file-layouts/). The data pools can use replication or erasure coding. If erasure coding pools are specified, the cluster must be running with bluestore enabled on the OSDs. -* `preserveFilesystemOnDelete`: If it is set to 'true' the filesystem will remain when the - CephFilesystem resource is deleted. This is a security measure to avoid loss of data if the - CephFilesystem resource is deleted accidentally. The default value is 'false'. This option - replaces `preservePoolsOnDelete` which should no longer be set. -* (deprecated) `preservePoolsOnDelete`: This option is replaced by the above - `preserveFilesystemOnDelete`. For backwards compatibility and upgradeability, if this is set to - 'true', Rook will treat `preserveFilesystemOnDelete` as being set to 'true'. - -## Metadata Server Settings - -The metadata server settings correspond to the MDS daemon settings. - -* `activeCount`: The number of active MDS instances. As load increases, CephFS will automatically partition the filesystem across the MDS instances. Rook will create double the number of MDS instances as requested by the active count. The extra instances will be in standby mode for failover. -* `activeStandby`: If true, the extra MDS instances will be in active standby mode and will keep a warm cache of the filesystem metadata for faster failover. The instances will be assigned by CephFS in failover pairs. If false, the extra MDS instances will all be on passive standby mode and will not maintain a warm cache of the metadata. -* `mirroring`: Sets up mirroring of the filesystem - * `enabled`: whether mirroring is enabled on that filesystem (default: false) - * `peers`: to configure mirroring peers - * `secretNames`: a list of peers to connect to. Currently (Ceph Pacific release) **only a single** peer is supported where a peer represents a Ceph cluster. - * `snapshotSchedules`: schedule(s) snapshot.One or more schedules are supported. - * `path`: filesystem source path to take the snapshot on - * `interval`: frequency of the snapshots. The interval can be specified in days, hours, or minutes using d, h, m suffix respectively. - * `startTime`: optional, determines at what time the snapshot process starts, specified using the ISO 8601 time format. - * `snapshotRetention`: allow to manage retention policies: - * `path`: filesystem source path to apply the retention on - * `duration`: -* `annotations`: Key value pair list of annotations to add. -* `labels`: Key value pair list of labels to add. -* `placement`: The mds pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml). -* `resources`: Set resource requests/limits for the Filesystem MDS Pod(s), see [MDS Resources Configuration Settings](#mds-resources-configuration-settings) -* `priorityClassName`: Set priority class name for the Filesystem MDS Pod(s) - -### MDS Resources Configuration Settings - -The format of the resource requests/limits structure is the same as described in the [Ceph Cluster CRD documentation](ceph-cluster-crd.md#resource-requirementslimits). - -If the memory resource limit is declared Rook will automatically set the MDS configuration `mds_cache_memory_limit`. The configuration value is calculated with the aim that the actual MDS memory consumption remains consistent with the MDS pods' resource declaration. - -In order to provide the best possible experience running Ceph in containers, Rook internally recommends the memory for MDS daemons to be at least 4096MB. -If a user configures a limit or request value that is too low, Rook will still run the pod(s) and print a warning to the operator log. diff --git a/Documentation/ceph-filesystem.md b/Documentation/ceph-filesystem.md deleted file mode 100644 index 0988ae4dd..000000000 --- a/Documentation/ceph-filesystem.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: Shared Filesystem -weight: 2300 -indent: true ---- -{% include_relative branch.liquid %} - -# Shared Filesystem - -A shared filesystem can be mounted with read/write permission from multiple pods. This may be useful for applications which can be clustered using a shared filesystem. - -This example runs a shared filesystem for the [kube-registry](https://github.com/kubernetes/kubernetes/tree/release-1.9/cluster/addons/registry). - -## Prerequisites - -This guide assumes you have created a Rook cluster as explained in the main [Kubernetes guide](ceph-quickstart.md) - -### Multiple Filesystems Support - -Multiple filesystems are supported as of the Ceph Pacific release. - -## Create the Filesystem - -Create the filesystem by specifying the desired settings for the metadata pool, data pools, and metadata server in the `CephFilesystem` CRD. In this example we create the metadata pool with replication of three and a single data pool with replication of three. For more options, see the documentation on [creating shared filesystems](ceph-filesystem-crd.md). - -Save this shared filesystem definition as `filesystem.yaml`: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: myfs - namespace: rook-ceph -spec: - metadataPool: - replicated: - size: 3 - dataPools: - - replicated: - size: 3 - preserveFilesystemOnDelete: true - metadataServer: - activeCount: 1 - activeStandby: true -``` - -The Rook operator will create all the pools and other resources necessary to start the service. This may take a minute to complete. - -```console -# Create the filesystem -kubectl create -f filesystem.yaml -[...] -``` - -```console -# To confirm the filesystem is configured, wait for the mds pods to start -kubectl -n rook-ceph get pod -l app=rook-ceph-mds -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-ceph-mds-myfs-7d59fdfcf4-h8kw9 1/1 Running 0 12s ->rook-ceph-mds-myfs-7d59fdfcf4-kgkjp 1/1 Running 0 12s ->``` - -To see detailed status of the filesystem, start and connect to the [Rook toolbox](ceph-toolbox.md). A new line will be shown with `ceph status` for the `mds` service. In this example, there is one active instance of MDS which is up, with one MDS instance in `standby-replay` mode in case of failover. - -```console -ceph status -``` ->``` -> ... -> services: -> mds: myfs-1/1/1 up {[myfs:0]=mzw58b=up:active}, 1 up:standby-replay ->``` - -## Provision Storage - -Before Rook can start provisioning storage, a StorageClass needs to be created based on the filesystem. This is needed for Kubernetes to interoperate -with the CSI driver to create persistent volumes. - -> **NOTE**: This example uses the CSI driver, which is the preferred driver going forward for K8s 1.13 and newer. Examples are found in the [CSI CephFS](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs) directory. For an example of a volume using the flex driver (required for K8s 1.12 and earlier), see the [Flex Driver](#flex-driver) section below. - -Save this storage class definition as `storageclass.yaml`: - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-cephfs -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.cephfs.csi.ceph.com -parameters: - # clusterID is the namespace where operator is deployed. - clusterID: rook-ceph - - # CephFS filesystem name into which the volume shall be created - fsName: myfs - - # Ceph pool into which the volume shall be created - # Required for provisionVolume: "true" - pool: myfs-data0 - - # The secrets contain Ceph admin credentials. These are generated automatically by the operator - # in the same namespace as the cluster. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph - csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph - -reclaimPolicy: Delete -``` - -If you've deployed the Rook operator in a namespace other than "rook-ceph" -as is common change the prefix in the provisioner to match the namespace -you used. For example, if the Rook operator is running in "rook-op" the -provisioner value should be "rook-op.rbd.csi.ceph.com". - -Create the storage class. - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml -``` - -## Quotas - -> **IMPORTANT**: The CephFS CSI driver uses quotas to enforce the PVC size requested. -Only newer kernels support CephFS quotas (kernel version of at least 4.17). -If you require quotas to be enforced and the kernel driver does not support it, you can disable the kernel driver -and use the FUSE client. This can be done by setting `CSI_FORCE_CEPHFS_KERNEL_CLIENT: false` -in the operator deployment (`operator.yaml`). However, it is important to know that when -the FUSE client is enabled, there is an issue that during upgrade the application pods will be -disconnected from the mount and will need to be restarted. See the [upgrade guide](ceph-upgrade.md) -for more details. - -## Consume the Shared Filesystem: K8s Registry Sample - -As an example, we will start the kube-registry pod with the shared filesystem as the backing store. -Save the following spec as `kube-registry.yaml`: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: cephfs-pvc - namespace: kube-system -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - storageClassName: rook-cephfs ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kube-registry - namespace: kube-system - labels: - k8s-app: kube-registry - kubernetes.io/cluster-service: "true" -spec: - replicas: 3 - selector: - matchLabels: - k8s-app: kube-registry - template: - metadata: - labels: - k8s-app: kube-registry - kubernetes.io/cluster-service: "true" - spec: - containers: - - name: registry - image: registry:2 - imagePullPolicy: Always - resources: - limits: - cpu: 100m - memory: 100Mi - env: - # Configuration reference: https://docs.docker.com/registry/configuration/ - - name: REGISTRY_HTTP_ADDR - value: :5000 - - name: REGISTRY_HTTP_SECRET - value: "Ple4seCh4ngeThisN0tAVerySecretV4lue" - - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY - value: /var/lib/registry - volumeMounts: - - name: image-store - mountPath: /var/lib/registry - ports: - - containerPort: 5000 - name: registry - protocol: TCP - livenessProbe: - httpGet: - path: / - port: registry - readinessProbe: - httpGet: - path: / - port: registry - volumes: - - name: image-store - persistentVolumeClaim: - claimName: cephfs-pvc - readOnly: false -``` - -Create the Kube registry deployment: - -```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/kube-registry.yaml -``` - -You now have a docker registry which is HA with persistent storage. - -### Kernel Version Requirement - -If the Rook cluster has more than one filesystem and the application pod is scheduled to a node with kernel version older than 4.7, inconsistent results may arise since kernels older than 4.7 do not support specifying filesystem namespaces. - -## Consume the Shared Filesystem: Toolbox - -Once you have pushed an image to the registry (see the [instructions](https://github.com/kubernetes/kubernetes/tree/release-1.9/cluster/addons/registry) to expose and use the kube-registry), verify that kube-registry is using the filesystem that was configured above by mounting the shared filesystem in the toolbox pod. See the [Direct Filesystem](direct-tools.md#shared-filesystem-tools) topic for more details. - -## Teardown - -To clean up all the artifacts created by the filesystem demo: - -```console -kubectl delete -f kube-registry.yaml -``` - -To delete the filesystem components and backing data, delete the Filesystem CRD. - -> **WARNING: Data will be deleted if preserveFilesystemOnDelete=false**. - -```console -kubectl -n rook-ceph delete cephfilesystem myfs -``` - -Note: If the "preserveFilesystemOnDelete" filesystem attribute is set to true, the above command won't delete the filesystem. Recreating the same CRD will reuse the existing filesystem. - -## Flex Driver - -To create a volume based on the flex driver instead of the CSI driver, see the [kube-registry.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/flex/kube-registry.yaml) example manifest or refer to the complete flow in the Rook v1.0 [Shared Filesystem](https://rook.io/docs/rook/v1.0/ceph-filesystem.html) documentation. - -### Advanced Example: Erasure Coded Filesystem - -The Ceph filesystem example can be found here: [Ceph Shared Filesystem - Samples - Erasure Coded](ceph-filesystem-crd.md#erasure-coded). diff --git a/Documentation/ceph-fs-mirror-crd.md b/Documentation/ceph-fs-mirror-crd.md deleted file mode 100644 index 192ea6500..000000000 --- a/Documentation/ceph-fs-mirror-crd.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: FilesystemMirror CRD -weight: 3600 -indent: true ---- -{% include_relative branch.liquid %} - -This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](ceph-quickstart.md) - -# Ceph FilesystemMirror CRD - -Rook allows creation and updating the fs-mirror daemon through the custom resource definitions (CRDs). -CephFS will support asynchronous replication of snapshots to a remote (different Ceph cluster) CephFS file system via cephfs-mirror tool. -Snapshots are synchronized by mirroring snapshot data followed by creating a snapshot with the same name (for a given directory on the remote file system) as the snapshot being synchronized. -For more information about user management and capabilities see the [Ceph docs](https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#cephfs-mirroring). - -## Creating daemon - -To get you started, here is a simple example of a CRD to deploy an cephfs-mirror daemon. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephFilesystemMirror -metadata: - name: my-fs-mirror - namespace: rook-ceph -``` - - -## Configuring mirroring peers - -On an external site you want to mirror with, you need to create a bootstrap peer token. -The token will be used by one site to **pull** images from the other site. -The following assumes the name of the pool is "test" and the site name "europe" (just like the region), so we will be pulling images from this site: - -```console -external-cluster-console # ceph fs snapshot mirror peer_bootstrap create myfs2 client.mirror europe -{"token": "eyJmc2lkIjogIjgyYjdlZDkyLTczYjAtNGIyMi1hOGI3LWVkOTQ4M2UyODc1NiIsICJmaWxlc3lzdGVtIjogIm15ZnMyIiwgInVzZXIiOiAiY2xpZW50Lm1pcnJvciIsICJzaXRlX25hbWUiOiAidGVzdCIsICJrZXkiOiAiQVFEVVAxSmdqM3RYQVJBQWs1cEU4cDI1ZUhld2lQK0ZXRm9uOVE9PSIsICJtb25faG9zdCI6ICJbdjI6MTAuOTYuMTQyLjIxMzozMzAwLHYxOjEwLjk2LjE0Mi4yMTM6Njc4OV0sW3YyOjEwLjk2LjIxNy4yMDc6MzMwMCx2MToxMC45Ni4yMTcuMjA3OjY3ODldLFt2MjoxMC45OS4xMC4xNTc6MzMwMCx2MToxMC45OS4xMC4xNTc6Njc4OV0ifQ=="} -``` - -For more details, refer to the official ceph-fs mirror documentation on [how to create a bootstrap peer](https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers). - -When the peer token is available, you need to create a Kubernetes Secret, it can named anything. -Our `europe-cluster-peer-fs-test-1` will have to be created manually, like so: - -```console -$ kubectl -n rook-ceph create secret generic "europe-cluster-peer-fs-test-1" \ ---from-literal=token=eyJmc2lkIjogIjgyYjdlZDkyLTczYjAtNGIyMi1hOGI3LWVkOTQ4M2UyODc1NiIsICJmaWxlc3lzdGVtIjogIm15ZnMyIiwgInVzZXIiOiAiY2xpZW50Lm1pcnJvciIsICJzaXRlX25hbWUiOiAidGVzdCIsICJrZXkiOiAiQVFEVVAxSmdqM3RYQVJBQWs1cEU4cDI1ZUhld2lQK0ZXRm9uOVE9PSIsICJtb25faG9zdCI6ICJbdjI6MTAuOTYuMTQyLjIxMzozMzAwLHYxOjEwLjk2LjE0Mi4yMTM6Njc4OV0sW3YyOjEwLjk2LjIxNy4yMDc6MzMwMCx2MToxMC45Ni4yMTcuMjA3OjY3ODldLFt2MjoxMC45OS4xMC4xNTc6MzMwMCx2MToxMC45OS4xMC4xNTc6Njc4OV0ifQ== -``` - -Rook will read a `token` key of the Data content of the Secret. - -You can now create the mirroring CR: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephFilesystemMirror -metadata: - name: my-fs-mirror - namespace: rook-ceph -spec: - peers: - secretNames: - - "europe-cluster-peer-pool-test-1" -``` - -You can add more filesystems by repeating the above and changing the "token" value of the Kubernetes Secret. -So the list might eventually look like: - -```yaml - peers: - secretNames: - - "europe-cluster-peer-fs-test-1" - - "europe-cluster-peer-fs-test-2" - - "europe-cluster-peer-fs-test-3" -``` - -Along with three Kubernetes Secret. - - -## Settings - -If any setting is unspecified, a suitable default will be used automatically. - -### FilesystemMirror metadata - -* `name`: The name that will be used for the Ceph cephfs-mirror daemon. -* `namespace`: The Kubernetes namespace that will be created for the Rook cluster. The services, pods, and other resources created by the operator will be added to this namespace. - -### FilesystemMirror Settings - -* `peers`: to configure mirroring peers - * `secretNames`: a list of peers to connect to. Currently (Ceph Pacific release) **only a single** peer is supported where a peer represents a Ceph cluster. - However, if you want to enable mirroring of multiple filesystems, you would have to have **one Secret per filesystem**. -* `placement`: The cephfs-mirror pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml). -* `annotations`: Key value pair list of annotations to add. -* `labels`: Key value pair list of labels to add. -* `resources`: The resource requirements for the cephfs-mirror pods. -* `priorityClassName`: The priority class to set on the cephfs-mirror pods. - diff --git a/Documentation/ceph-mon-health.md b/Documentation/ceph-mon-health.md deleted file mode 100644 index f67cb5e65..000000000 --- a/Documentation/ceph-mon-health.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Monitor Health -weight: 11130 -indent: true ---- - -# Monitor Health - -Failure in a distributed system is to be expected. Ceph was designed from the ground up to deal with the failures of a distributed system. -At the next layer, Rook was designed from the ground up to automate recovery of Ceph components that traditionally required admin intervention. -Monitor health is the most critical piece of the equation that Rook actively monitors. If they are not in a good state, -the operator will take action to restore their health and keep your cluster protected from disaster. - -The Ceph monitors (mons) are the brains of the distributed cluster. They control all of the metadata that is necessary -to store and retrieve your data as well as keep it safe. If the monitors are not in a healthy state you will risk losing all the data in your system. - -## Monitor Identity - -Each monitor in a Ceph cluster has a static identity. Every component in the cluster is aware of the identity, and that identity -must be immutable. The identity of a mon is its IP address. - -To have an immutable IP address in Kubernetes, Rook creates a K8s service for each monitor. The clusterIP of the service will act as the stable identity. - -When a monitor pod starts, it will bind to its podIP and it will expect communication to be via its service IP address. - -## Monitor Quorum - -Multiple mons work together to provide redundancy by each keeping a copy of the metadata. A variation of the distributed algorithm Paxos -is used to establish consensus about the state of the cluster. Paxos requires a super-majority of mons to be running in order to establish -quorum and perform operations in the cluster. If the majority of mons are not running, quorum is lost and nothing can be done in the cluster. - -### How many mons? - -Most commonly a cluster will have three mons. This would mean that one mon could go down and allow the cluster to remain healthy. -You would still have 2/3 mons running to give you consensus in the cluster for any operation. - -You will always want an odd number of mons. Fifty percent of mons will not be sufficient to maintain quorum. If you had two mons and one -of them went down, you would have 1/2 of quorum. Since that is not a super-majority, the cluster would have to wait until the second mon is up again. -Therefore, Rook prohibits an even number of mons. - -The number of mons to create in a cluster depends on your tolerance for losing a node. If you have 1 mon zero nodes can be lost -to maintain quorum. With 3 mons one node can be lost, and with 5 mons two nodes can be lost. Because the Rook operator will automatically -start a new new monitor if one dies, you typically only need three mons. The more mons you have, the more overhead there will be to make -a change to the cluster, which could become a performance issue in a large cluster. - -## Mitigating Monitor Failure - -Whatever the reason that a mon may fail (power failure, software crash, software hang, etc), there are several layers of mitigation in place -to help recover the mon. It is always better to bring an existing mon back up than to failover to bring up a new mon. - -The Rook operator creates a mon with a Deployment to ensure that the mon pod will always be restarted if it fails. If a mon pod stops -for any reason, Kubernetes will automatically start the pod up again. - -In order for a mon to support a pod/node restart, the mon metadata is persisted to disk, either under the `dataDirHostPath` specified -in the CephCluster CR, or in the volume defined by the `volumeClaimTemplate` in the CephCluster CR. -This will allow the mon to start back up with its existing metadata and continue where it left off even if the pod had -to be re-created. Without this persistence, the mon cannot restart. - -## Failing over a Monitor - -If a mon is unhealthy and the K8s pod restart or liveness probe are not sufficient to bring a mon back up, the operator will make the decision -to terminate the unhealthy monitor deployment and bring up a new monitor with a new identity. -This is an operation that must be done while mon quorum is maintained by other mons in the cluster. - -The operator checks for mon health every 45 seconds. If a monitor is down, the operator will wait 10 minutes before failing over the unhealthy mon. -These two intervals can be configured as parameters to the CephCluster CR (see below). If the intervals are too short, it could be unhealthy if the mons are failed over too aggressively. If the intervals are too long, the cluster could be at risk of losing quorum if a new monitor is not brought up before another mon fails. - -```yaml -healthCheck: - daemonHealth: - mon: - disabled: false - interval: 45s - timeout: 10m -``` - -If you want to force a mon to failover for testing or other purposes, you can scale down the mon deployment to 0, then wait -for the timeout. Note that the operator may scale up the mon again automatically if the operator is restarted or if a full -reconcile is triggered, such as when the CephCluster CR is updated. - -If the mon pod is in pending state and couldn't be assigned to a node (say, due to node drain), then the operator will wait for the timeout again before the mon failover. So the timeout waiting for the mon failover will be doubled in this case. - -To disable monitor automatic failover, the `timeout` can be set to `0`, if the monitor goes out of quorum Rook will never fail it over onto another node. -This is especially useful for planned maintenance. - -### Example Failover - -Rook will create mons with pod names such as mon-a, mon-b, and mon-c. Let's say mon-b had an issue and the pod failed. -```console -kubectl -n rook-ceph get pod -l app=rook-ceph-mon -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-ceph-mon-a-74dc96545-ch5ns 1/1 Running 0 9m ->rook-ceph-mon-b-6b9d895c4c-bcl2h 1/1 Error 2 9m ->rook-ceph-mon-c-7d6df6d65c-5cjwl 1/1 Running 0 8m ->``` - -After a failover, you will see the unhealthy mon removed and a new mon added such as mon-d. A fully healthy mon quorum is now running again. -```console -kubectl -n rook-ceph get pod -l app=rook-ceph-mon -``` ->``` ->NAME READY STATUS RESTARTS AGE ->rook-ceph-mon-a-74dc96545-ch5ns 1/1 Running 0 19m ->rook-ceph-mon-c-7d6df6d65c-5cjwl 1/1 Running 0 18m ->rook-ceph-mon-d-9e7ea7e76d-4bhxm 1/1 Running 0 20s ->``` - -From the toolbox we can verify the status of the health mon quorum: -```console -ceph -s -``` - ->``` -> cluster: -> id: 35179270-8a39-4e08-a352-a10c52bb04ff -> health: HEALTH_OK -> -> services: -> mon: 3 daemons, quorum a,b,d (age 2m) -> mgr: a(active, since 12m) -> osd: 3 osds: 3 up (since 10m), 3 in (since 10m) -> ... ->``` diff --git a/Documentation/ceph-monitoring.md b/Documentation/ceph-monitoring.md deleted file mode 100644 index 57e858232..000000000 --- a/Documentation/ceph-monitoring.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: Prometheus Monitoring -weight: 2500 -indent: true ---- -{% include_relative branch.liquid %} - -# Prometheus Monitoring - -Each Rook Ceph cluster has some built in metrics collectors/exporters for monitoring with [Prometheus](https://prometheus.io/). - -If you do not have Prometheus running, follow the steps below to enable monitoring of Rook. If your cluster already -contains a Prometheus instance, it will automatically discover Rooks scrape endpoint using the standard -`prometheus.io/scrape` and `prometheus.io/port` annotations. - -> **NOTE**: This assumes that the Prometheus instances is searching all your Kubernetes namespaces for Pods with these annotations. - -## Prometheus Operator - -First the Prometheus operator needs to be started in the cluster so it can watch for our requests to start monitoring Rook and respond by deploying the correct Prometheus pods and configuration. -A full explanation can be found in the [Prometheus operator repository on GitHub](https://github.com/prometheus-operator/prometheus-operator), but the quick instructions can be found here: - -```console -kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/v0.40.0/bundle.yaml -``` - -This will start the Prometheus operator, but before moving on, wait until the operator is in the `Running` state: - -```console -kubectl get pod -``` - -Once the Prometheus operator is in the `Running` state, proceed to the next section to create a Prometheus instance. - -## Prometheus Instances - -With the Prometheus operator running, we can create a service monitor that will watch the Rook cluster and collect metrics regularly. -From the root of your locally cloned Rook repo, go the monitoring directory: - -```console -$ git clone --single-branch --branch v1.7.2 https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/ceph/monitoring -``` - -Create the service monitor as well as the Prometheus server pod and service: - -```console -kubectl create -f service-monitor.yaml -kubectl create -f prometheus.yaml -kubectl create -f prometheus-service.yaml -``` - -Ensure that the Prometheus server pod gets created and advances to the `Running` state before moving on: - -```console -kubectl -n rook-ceph get pod prometheus-rook-prometheus-0 -``` - -> **NOTE**: It is not recommended to consume storage from the Ceph cluster for Prometheus. -> If the Ceph cluster fails, Prometheus would become unresponsive and thus not alert you of the failure. - -## Prometheus Web Console - -Once the Prometheus server is running, you can open a web browser and go to the URL that is output from this command: - -```console -echo "http://$(kubectl -n rook-ceph -o jsonpath={.status.hostIP} get pod prometheus-rook-prometheus-0):30900" -``` - -You should now see the Prometheus monitoring website. - -![Prometheus Monitoring Website](media/prometheus-monitor.png) - -Click on `Graph` in the top navigation bar. - -![Prometheus Add graph](media/prometheus-graph.png) - -In the dropdown that says `insert metric at cursor`, select any metric you would like to see, for example `ceph_cluster_total_used_bytes` - -![Prometheus Select Metric](media/prometheus-metric-cursor.png) - -Click on the `Execute` button. - -![Prometheus Execute Metric](media/prometheus-execute-metric-cursor.png) - -Below the `Execute` button, ensure the `Graph` tab is selected and you should now see a graph of your chosen metric over time. - -![Prometheus Execute Metric](media/prometheus-metric-cursor-graph.png) - -## Prometheus Consoles - -You can find Prometheus Consoles for and from Ceph here: [GitHub ceph/cephmetrics - dashboards/current directory](https://github.com/ceph/cephmetrics/tree/master/dashboards/current). - -A guide to how you can write your own Prometheus consoles can be found on the official Prometheus site here: [Prometheus.io Documentation - Console Templates](https://prometheus.io/docs/visualization/consoles/). - -## Prometheus Alerts - -To enable the Ceph Prometheus alerts follow these steps: - -1. Create the RBAC rules to enable monitoring. - -```console -kubectl create -f cluster/examples/kubernetes/ceph/monitoring/rbac.yaml -``` - -2. Make following changes to your CephCluster object (e.g., `cluster.yaml`). - -```YAML -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -[...] -spec: -[...] - monitoring: - enabled: true - rulesNamespace: "rook-ceph" -[...] -``` - -(Where `rook-ceph` is the CephCluster name / namespace) - -3. Deploy or update the CephCluster object. - -```console -kubectl apply -f cluster.yaml -``` - -> **NOTE**: This expects the Prometheus Operator and a Prometheus instance to be pre-installed by the admin. - -## Grafana Dashboards - -The dashboards have been created by [@galexrt](https://github.com/galexrt). For feedback on the dashboards please reach out to him on the [Rook.io Slack](https://slack.rook.io). - -> **NOTE**: The dashboards are only compatible with Grafana 7.2.0 or higher. -> -> Also note that the dashboards are updated from time to time, to fix issues and improve them. - -The following Grafana dashboards are available: - -* [Ceph - Cluster](https://grafana.com/dashboards/2842) -* [Ceph - OSD (Single)](https://grafana.com/dashboards/5336) -* [Ceph - Pools](https://grafana.com/dashboards/5342) - -## Updates and Upgrades - -When updating Rook, there may be updates to RBAC for monitoring. It is easy to apply the changes -with each update or upgrade. This should be done at the same time you update Rook common resources -like `common.yaml`. - -```console -kubectl apply -f cluster/examples/kubernetes/ceph/monitoring/rbac.yaml -``` - -> This is updated automatically if you are upgrading via the helm chart - -## Teardown - -To clean up all the artifacts created by the monitoring walk-through, copy/paste the entire block below (note that errors about resources "not found" can be ignored): - -```console -kubectl delete -f service-monitor.yaml -kubectl delete -f prometheus.yaml -kubectl delete -f prometheus-service.yaml -kubectl delete -f https://raw.githubusercontent.com/coreos/prometheus-operator/v0.40.0/bundle.yaml -``` - -Then the rest of the instructions in the [Prometheus Operator docs](https://github.com/prometheus-operator/prometheus-operator#removal) can be followed to finish cleaning up. - -## Special Cases - -### Tectonic Bare Metal - -Tectonic strongly discourages the `tectonic-system` Prometheus instance to be used outside their intentions, so you need to create a new [Prometheus Operator](https://coreos.com/operators/prometheus/docs/latest/) yourself. -After this you only need to create the service monitor as stated above. - -### CSI Liveness - -To integrate CSI liveness and grpc into ceph monitoring we will need to deploy -a service and service monitor. - -```console -kubectl create -f csi-metrics-service-monitor.yaml -``` - -This will create the service monitor to have promethues monitor CSI - -### Collecting RBD per-image IO statistics - -RBD per-image IO statistics collection is disabled by default. This can be enabled by setting `enableRBDStats: true` in the CephBlockPool spec. -Prometheus does not need to be restarted after enabling it. - -### Using custom label selectors in Prometheus - -If Prometheus needs to select specific resources, we can do so by injecting labels into these objects and using it as label selector. - -```YAML -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -[...] -spec: -[...] -labels: - monitoring: - prometheus: k8s -[...] -``` diff --git a/Documentation/ceph-nfs-crd.md b/Documentation/ceph-nfs-crd.md deleted file mode 100644 index 9b10aa47f..000000000 --- a/Documentation/ceph-nfs-crd.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: NFS CRD -weight: 3100 -indent: true ---- - -# Ceph NFS Gateway CRD - -## Overview - -Rook allows exporting NFS shares of the filesystem or object store through the CephNFS custom resource definition. This will spin up a cluster of [NFS Ganesha](https://github.com/nfs-ganesha/nfs-ganesha) servers that coordinate with one another via shared RADOS objects. The servers will be configured for NFSv4.1+ access, as serving earlier protocols can inhibit responsiveness after a server restart. - -## Samples - -The following sample will create a two-node active-active cluster of NFS Ganesha gateways. The recovery objects are stored in a RADOS pool named `myfs-data0` with a RADOS namespace of `nfs-ns`. - -This example requires the filesystem to first be configured by the [Filesystem](ceph-filesystem-crd.md) because here recovery objects are stored in filesystem data pool. - -> **NOTE**: For an RGW object store, a data pool of `my-store.rgw.buckets.data` can be used after configuring the [Object Store](ceph-object-store-crd.md). - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephNFS -metadata: - name: my-nfs - namespace: rook-ceph -spec: - rados: - # RADOS pool where NFS client recovery data and per-daemon configs are - # stored. In this example the data pool for the "myfs" filesystem is used. - # If using the object store example, the data pool would be - # "my-store.rgw.buckets.data". Note that this has nothing to do with where - # exported CephFS' or objectstores live. - pool: myfs-data0 - # RADOS namespace where NFS client recovery data is stored in the pool. - namespace: nfs-ns - # Settings for the NFS server - server: - # the number of active NFS servers - active: 2 - # A key/value list of annotations - annotations: - # key: value - # where to run the NFS server - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - mds-node - # tolerations: - # - key: mds-node - # operator: Exists - # podAffinity: - # podAntiAffinity: - # topologySpreadConstraints: - - # The requests and limits set here allow the ganesha pod(s) to use half of one CPU core and 1 gigabyte of memory - resources: - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # the priority class to set to influence the scheduler's pod preemption - priorityClassName: -``` - - Enable the creation of NFS exports in the dashboard for a given cephfs or object gateway pool by running the following command in the toolbox container: - -[For single NFS-GANESHA cluster](https://docs.ceph.com/en/latest/mgr/dashboard/#configuring-nfs-ganesha-in-the-dashboard) - -```console -ceph dashboard set-ganesha-clusters-rados-pool-namespace [/] -``` - -[For multiple NFS-GANESHA cluster](https://docs.ceph.com/en/latest/mgr/dashboard/#support-for-multiple-nfs-ganesha-clusters) - -```console -ceph dashboard set-ganesha-clusters-rados-pool-namespace :[/](,:[/])* -``` - -## NFS Settings - -### RADOS Settings - -* `pool`: The pool where ganesha recovery backend and supplemental configuration objects will be stored -* `namespace`: The namespace in `pool` where ganesha recovery backend and supplemental configuration objects will be stored - -> **NOTE**: Don't use EC pools for NFS because ganesha uses omap in the recovery objects and grace db. EC pools do not support omap. - -## EXPORT Block Configuration - -All daemons within a cluster will share configuration with no exports defined, and that includes a RADOS object via: - -```ini -%url rados:////conf-nfs. -``` - -> **NOTE**: This format of nfs-ganesha config object name was introduced in Ceph Octopus Version. In older versions, each daemon has it's own config object and with the name as *conf-.*. The nodeid is a value automatically assigned internally by rook. Nodeids start with "a" and go through "z", at which point they become two letters ("aa" to "az"). - -The pool and namespace are configured via the spec's RADOS block. - -When a server is started, it will create the included object if it does not already exist. It is possible to prepopulate the included objects prior to starting the server. The format for these objects is documented in the [NFS Ganesha](https://github.com/nfs-ganesha/nfs-ganesha/wiki) project. - -## Scaling the active server count - -It is possible to scale the size of the cluster up or down by modifying -the `spec.server.active` field. Scaling the cluster size up can be done at -will. Once the new server comes up, clients can be assigned to it -immediately. - -The CRD always eliminates the highest index servers first, in reverse -order from how they were started. Scaling down the cluster requires that -clients be migrated from servers that will be eliminated to others. That -process is currently a manual one and should be performed before -reducing the size of the cluster. diff --git a/Documentation/ceph-object-bucket-claim.md b/Documentation/ceph-object-bucket-claim.md deleted file mode 100644 index c94199171..000000000 --- a/Documentation/ceph-object-bucket-claim.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Object Bucket Claim -weight: 2850 -indent: true ---- - -# Ceph Object Bucket Claim - -Rook supports the creation of new buckets and access to existing buckets via two custom resources: - -- an `Object Bucket Claim (OBC)` is custom resource which requests a bucket (new or existing) and is described by a Custom Resource Definition (CRD) shown below. -- an `Object Bucket (OB)` is a custom resource automatically generated when a bucket is provisioned. It is a global resource, typically not visible to non-admin users, and contains information specific to the bucket. It is described by an OB CRD, also shown below. - -An OBC references a storage class which is created by an administrator. The storage class defines whether the bucket requested is a new bucket or an existing bucket. It also defines the bucket retention policy. -Users request a new or existing bucket by creating an OBC which is shown below. The ceph provisioner detects the OBC and creates a new bucket or grants access to an existing bucket, depending the the storage class referenced in the OBC. It also generates a Secret which provides credentials to access the bucket, and a ConfigMap which contains the bucket's endpoint. Application pods consume the information in the Secret and ConfigMap to access the bucket. Please note that to make provisioner watch the cluster namespace only you need to set `ROOK_OBC_WATCH_OPERATOR_NAMESPACE` to `true` in the operator manifest, otherwise it watches all namespaces. - -## Sample - -### OBC Custom Resource -```yaml -apiVersion: objectbucket.io/v1alpha1 -kind: ObjectBucketClaim -metadata: - name: ceph-bucket [1] - namespace: rook-ceph [2] -spec: - bucketName: [3] - generateBucketName: photo-booth [4] - storageClassName: rook-ceph-bucket [4] - additionalConfig: [5] - maxObjects: "1000" - maxSize: "2G" -``` -1. `name` of the `ObjectBucketClaim`. This name becomes the name of the Secret and ConfigMap. -1. `namespace`(optional) of the `ObjectBucketClaim`, which is also the namespace of the ConfigMap and Secret. -1. `bucketName` name of the `bucket`. -**Not** recommended for new buckets since names must be unique within -an entire object store. -1. `generateBucketName` value becomes the prefix for a randomly generated name, if supplied then `bucketName` must be empty. -If both `bucketName` and `generateBucketName` are supplied then `BucketName` has precedence and `GenerateBucketName` is ignored. -If both `bucketName` and `generateBucketName` are blank or omitted then the storage class is expected to contain the name of an _existing_ bucket. It's an error if all three bucket related names are blank or omitted. -1. `storageClassName` which defines the StorageClass which contains the names of the bucket provisioner, the object-store and specifies the bucket retention policy. -1. `additionalConfig` is an optional list of key-value pairs used to define attributes specific to the bucket being provisioned by this OBC. This information is typically tuned to a particular bucket provisioner and may limit application portability. Options supported: - - `maxObjects`: The maximum number of objects in the bucket - - `maxSize`: The maximum size of the bucket, please note minimum recommended value is 4K. - -### OBC Custom Resource after Bucket Provisioning -```yaml -apiVersion: objectbucket.io/v1alpha1 -kind: ObjectBucketClaim -metadata: - creationTimestamp: "2019-10-18T09:54:01Z" - generation: 2 - name: ceph-bucket - namespace: default [1] - resourceVersion: "559491" -spec: - ObjectBucketName: obc-default-ceph-bucket [2] - additionalConfig: null - bucketName: photo-booth-c1178d61-1517-431f-8408-ec4c9fa50bee [3] - storageClassName: rook-ceph-bucket [4] -status: - phase: Bound [5] -``` -1. `namespace` where OBC got created. -1. `ObjectBucketName` generated OB name created using name space and OBC name. -1. the generated (in this case), unique `bucket name` for the new bucket. -1. name of the storage class from OBC got created. -1. phases of bucket creation: - - _Pending_: the operator is processing the request. - - _Bound_: the operator finished processing the request and linked the OBC and OB - - _Released_: the OB has been deleted, leaving the OBC unclaimed but unavailable. - - _Failed_: not currently set. - -### App Pod -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: app-pod - namespace: dev-user -spec: - containers: - - name: mycontainer - image: redis - envFrom: [1] - - configMapRef: - name: ceph-bucket [2] - - secretRef: - name: ceph-bucket [3] -``` -1. use `env:` if mapping of the defined key names to the env var names used by the app is needed. -1. makes available to the pod as env variables: `BUCKET_HOST`, `BUCKET_PORT`, `BUCKET_NAME` -1. makes available to the pod as env variables: `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` - -### StorageClass -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-bucket - labels: - aws-s3/object [1] -provisioner: rook-ceph.ceph.rook.io/bucket [2] -parameters: [3] - objectStoreName: my-store - objectStoreNamespace: rook-ceph - region: us-west-1 - bucketName: ceph-bucket [4] -reclaimPolicy: Delete [5] -``` -1. `label`(optional) here associates this `StorageClass` to a specific provisioner. -1. `provisioner` responsible for handling `OBCs` referencing this `StorageClass`. -1. **all** `parameter` required. -1. `bucketName` is required for access to existing buckets but is omitted when provisioning new buckets. -Unlike greenfield provisioning, the brownfield bucket name appears in the `StorageClass`, not the `OBC`. -1. rook-ceph provisioner decides how to treat the `reclaimPolicy` when an `OBC` is deleted for the bucket. See explanation as [specified in Kubernetes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#retain) -+ _Delete_ = physically delete the bucket. -+ _Retain_ = do not physically delete the bucket. diff --git a/Documentation/ceph-object-multisite-crd.md b/Documentation/ceph-object-multisite-crd.md deleted file mode 100644 index dff605920..000000000 --- a/Documentation/ceph-object-multisite-crd.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Object Multisite CRDs -weight: 2825 -indent: true ---- - -# Ceph Object Multisite CRDs - -The following CRDs enable Ceph object stores to isolate or replicate data via multisite. For more information on multisite, visit the [ceph-object-multisite](/Documentation/ceph-object-multisite.md) documentation. - -## Ceph Object Realm CRD - -Rook allows creation of a realm in a ceph cluster for object stores through the custom resource definitions (CRDs). The following settings are available for Ceph object store realms. - -### Sample - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephObjectRealm -metadata: - name: realm-a - namespace: rook-ceph -# This endpoint in this section needs is an endpoint from the master zone in the master zone group of realm-a. See object-multisite.md for more details. -spec: - pull: - endpoint: http://10.2.105.133:80 -``` - -### Object Realm Settings - -#### Metadata - -* `name`: The name of the object realm to create -* `namespace`: The namespace of the Rook cluster where the object realm is created. - -#### Spec - -* `pull`: This optional section is for the pulling the realm for another ceph cluster. - * `endpoint`: The endpoint in the realm from another ceph cluster you want to pull from. This endpoint must be in the master zone of the master zone group of the realm. - -## Ceph Object Zone Group CRD - -Rook allows creation of zone groups in a ceph cluster for object stores through the custom resource definitions (CRDs). The following settings are available for Ceph object store zone groups. - -### Sample - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephObjectZoneGroup -metadata: - name: zonegroup-a - namespace: rook-ceph -spec: - realm: realm-a -``` - -### Object Zone Group Settings - -#### Metadata - -* `name`: The name of the object zone group to create -* `namespace`: The namespace of the Rook cluster where the object zone group is created. - -#### Spec - -* `realm`: The object realm in which the zone group will be created. This matches the name of the object realm CRD. - -## Ceph Object Zone CRD - -Rook allows creation of zones in a ceph cluster for object stores through the custom resource definitions (CRDs). The following settings are available for Ceph object store zone. - -### Sample - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephObjectZone -metadata: - name: zone-a - namespace: rook-ceph -spec: - zoneGroup: zonegroup-a - metadataPool: - failureDomain: host - replicated: - size: 3 - dataPool: - failureDomain: osd - erasureCoded: - dataChunks: 2 - codingChunks: 1 -``` - -### Object Zone Settings - -#### Metadata - -* `name`: The name of the object zone to create -* `namespace`: The namespace of the Rook cluster where the object zone is created. - -### Pools - -The pools allow all of the settings defined in the Pool CRD spec. For more details, see the [Pool CRD](ceph-pool-crd.md) settings. In the example above, there must be at least three hosts (size 3) and at least three devices (2 data + 1 coding chunks) in the cluster. - -#### Spec - -* `zonegroup`: The object zonegroup in which the zone will be created. This matches the name of the object zone group CRD. -* `metadataPool`: The settings used to create all of the object store metadata pools. Must use replication. -* `dataPool`: The settings to create the object store data pool. Can use replication or erasure coding. diff --git a/Documentation/ceph-object-multisite.md b/Documentation/ceph-object-multisite.md deleted file mode 100644 index 97e397141..000000000 --- a/Documentation/ceph-object-multisite.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: Object Multisite -weight: 2250 -indent: true ---- - -# Object Multisite - -Multisite is a feature of Ceph that allows object stores to replicate their data over multiple Ceph clusters. - -Multisite also allows object stores to be independent and isolated from other object stores in a cluster. - -When a ceph-object-store is created without the `zone` section; a realm, zone group, and zone is created with the same name as the ceph-object-store. - -Since it is the only ceph-object-store in the realm, the data in the ceph-object-store remain independent and isolated from others on the same cluster. - -When a ceph-object-store is created with the `zone` section, the ceph-object-store will join a custom created zone, zone group, and realm each with a different names than its own. - -This allows the ceph-object-store to replicate its data over multiple Ceph clusters. - -To review core multisite concepts please read the [ceph-multisite design overview](/design/ceph/object/ceph-multisite-overview.md). - -## Prerequisites - -This guide assumes a Rook cluster as explained in the [Quickstart](ceph-quickstart.md). - -# Creating Object Multisite - -If an admin wants to set up multisite on a Rook Ceph cluster, the admin should create: - -1. A [realm](/Documentation/ceph-object-multisite-crd.md#object-realm-settings) -1. A [zonegroup](/Documentation/ceph-object-multisite-crd.md#object-zone-group-settings) -1. A [zone](/Documentation/ceph-object-multisite-crd.md#object-zone-settings) -1. An [object-store](/Documentation/ceph-object-store-crd.md#zone-settings) with the `zone` section - -object-multisite.yaml in the [examples](/cluster/examples/kubernetes/ceph/) directory can be used to create the multisite CRDs. -```console -kubectl create -f object-multisite.yaml -``` - -The first zone group created in a realm is the master zone group. The first zone created in a zone group is the master zone. - -When a non-master zone or non-master zone group is created, the zone group or zone is not in the Ceph Radosgw Multisite [Period](https://docs.ceph.com/docs/master/radosgw/multisite/) until an object-store is created in that zone (and zone group). - -The zone will create the pools for the object-store(s) that are in the zone to use. - -When one of the multisite CRs (realm, zone group, zone) is deleted the underlying ceph realm/zone group/zone is not deleted, neither are the pools created by the zone. See the "Multisite Cleanup" section for more information. - -For more information on the multisite CRDs please read [ceph-object-multisite-crd](ceph-object-multisite-crd.md). - -# Pulling a Realm - -If an admin wants to sync data from another cluster, the admin needs to pull a realm on a Rook Ceph cluster from another Rook Ceph (or Ceph) cluster. - -To begin doing this, the admin needs 2 pieces of information: - -1. An endpoint from the realm being pulled from -1. The access key and the system key of the system user from the realm being pulled from. - -## Getting the Pull Endpoint - -To pull a Ceph realm from a remote Ceph cluster, an `endpoint` must be added to the CephObjectRealm's `pull` section in the `spec`. This endpoint must be from the master zone in the master zone group of that realm. - -If an admin does not know of an endpoint that fits this criteria, the admin can find such an endpoint on the remote Ceph cluster (via the tool box if it is a Rook Ceph Cluster) by running: - -``` -$ radosgw-admin zonegroup get --rgw-realm=$REALM_NAME --rgw-zonegroup=$MASTER_ZONEGROUP_NAME -``` ->``` ->{ -> ... -> "endpoints": [http://10.17.159.77:80], -> ... ->} ->``` - -A list of endpoints in the master zone group in the master zone is in the `endpoints` section of the JSON output of the `zonegoup get` command. - -This endpoint must also be resolvable from the new Rook Ceph cluster. To test this run the `curl` command on the endpoint: - -``` -$ curl -L http://10.17.159.77:80 -``` ->``` ->anonymous ->``` - -Finally add the endpoint to the `pull` section of the CephObjectRealm's spec. The CephObjectRealm should have the same name as the CephObjectRealm/Ceph realm it is pulling from. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephObjectRealm -metadata: - name: realm-a - namespace: rook-ceph -spec: - pull: - endpoint: http://10.17.159.77:80 -``` - -## Getting Realm Access Key and Secret Key - -The access key and secret key of the system user are keys that allow other Ceph clusters to pull the realm of the system user. - -### Getting the Realm Access Key and Secret Key from the Rook Ceph Cluster - -When an admin creates a ceph-object-realm a system user automatically gets created for the realm with an access key and a secret key. - -This system user has the name "$REALM_NAME-system-user". For the example realm, the uid for the system user is "realm-a-system-user". - -These keys for the user are exported as a kubernetes [secret](https://kubernetes.io/docs/concepts/configuration/secret/) called "$REALM_NAME-keys" (ex: realm-a-keys). - -To get these keys from the cluster the realm was originally created on, run: -```console -$ kubectl -n $ORIGINAL_CLUSTER_NAMESPACE get secrets realm-a-keys -o yaml > realm-a-keys.yaml -``` -Edit the `realm-a-keys.yaml` file, and change the `namespace` with the namespace that the new Rook Ceph cluster exists in. - -Then create a kubernetes secret on the pulling Rook Ceph cluster with the same secrets yaml file. -```console -kubectl create -f realm-a-keys.yaml -``` - -### Getting the Realm Access Key and Secret Key from a Non Rook Ceph Cluster - -The access key and the secret key of the system user can be found in the output of running the following command on a non-rook ceph cluster: -``` -radosgw-admin user info --uid="realm-a-system-user" -``` ->```{ -> ... -> "keys": [ -> { -> "user": "realm-a-system-user" -> "access_key": "aSw4blZIKV9nKEU5VC0=" -> "secret_key": "JSlDXFt5TlgjSV9QOE9XUndrLiI5JEo9YDBsJg==", -> } -> ], -> ... ->} ->``` - -Then base64 encode the each of the keys and create a `.yaml` file for the Kubernetes secret from the following template. - -Only the `access-key`, `secret-key`, and `namespace` sections need to be replaced. -```yaml -apiVersion: v1 -data: - access-key: YVN3NGJsWklLVjluS0VVNVZDMD0= - secret-key: SlNsRFhGdDVUbGdqU1Y5UU9FOVhVbmRyTGlJNUpFbzlZREJzSmc9PQ== -kind: Secret -metadata: - name: realm-a-keys - namespace: $NEW_ROOK_CLUSTER_NAMESPACE -type: kubernetes.io/rook -``` - -Finally, create a kubernetes secret on the pulling Rook Ceph cluster with the new secrets yaml file. -```console -kubectl create -f realm-a-keys.yaml -``` - -### Pulling a Realm on a New Rook Ceph Cluster - -Once the admin knows the endpoint and the secret for the keys has been created, the admin should create: - -1. A [CephObjectRealm](/design/ceph/object/realm.md) matching to the realm on the other Ceph cluster, with an endpoint as described above. -1. A [CephObjectZoneGroup](/design/ceph/object/zone-group.md) matching the master zone group name or the master CephObjectZoneGroup from the cluster the the realm was pulled from. -1. A [CephObjectZone](/design/ceph/object/zone.md) referring to the CephObjectZoneGroup created above. -1. A [CephObjectStore](/design/ceph/object/store.md) referring to the new CephObjectZone resource. - -object-multisite-pull-realm.yaml (with changes) in the [examples](/cluster/examples/kubernetes/ceph/) directory can be used to create the multisite CRDs. -```console -kubectl create -f object-multisite-pull-realm.yaml -``` - -# Multisite Cleanup - -Multisite configuration must be cleaned up by hand. Deleting a realm/zone group/zone CR will not delete the underlying Ceph realm, zone group, zone, or the pools associated with a zone. - -## Realm Deletion - -Changes made to the resource's configuration or deletion of the resource are not reflected on the Ceph cluster. - -When the ceph-object-realm resource is deleted or modified, the realm is not deleted from the Ceph cluster. Realm deletion must be done via the toolbox. - -### Deleting a Realm - -The Rook toolbox can modify the Ceph Multisite state via the radosgw-admin command. - -The following command, run via the toolbox, deletes the realm. - -```console -radosgw-admin realm delete --rgw-realm=realm-a -``` - -## Zone Group Deletion - -Changes made to the resource's configuration or deletion of the resource are not reflected on the Ceph cluster. - -When the ceph-object-zone group resource is deleted or modified, the zone group is not deleted from the Ceph cluster. Zone Group deletion must be done through the toolbox. - -### Deleting a Zone Group - -The Rook toolbox can modify the Ceph Multisite state via the radosgw-admin command. - -The following command, run via the toolbox, deletes the zone group. - -```console -radosgw-admin zonegroup delete --rgw-realm=realm-a --rgw-zonegroup=zone-group-a -radosgw-admin period update --commit --rgw-realm=realm-a --rgw-zonegroup=zone-group-a -``` - -## Deleting and Reconfiguring the Ceph Object Zone - -Changes made to the resource's configuration or deletion of the resource are not reflected on the Ceph cluster. - -When the ceph-object-zone resource is deleted or modified, the zone is not deleted from the Ceph cluster. Zone deletion must be done through the toolbox. - -### Changing the Master Zone - -The Rook toolbox can change the master zone in a zone group. - -```console -radosgw-admin zone modify --rgw-realm=realm-a --rgw-zonegroup=zone-group-a --rgw-zone=zone-a --master -radosgw-admin zonegroup modify --rgw-realm=realm-a --rgw-zonegroup=zone-group-a --master -radosgw-admin period update --commit --rgw-realm=realm-a --rgw-zonegroup=zone-group-a --rgw-zone=zone-a -``` - -### Deleting Zone - -The Rook toolbox can modify the Ceph Multisite state via the radosgw-admin command. - -There are two scenarios possible when deleting a zone. -The following commands, run via the toolbox, deletes the zone if there is only one zone in the zone group. - -```console -radosgw-admin zone delete --rgw-realm=realm-a --rgw-zonegroup=zone-group-a --rgw-zone=zone-a -radosgw-admin period update --commit --rgw-realm=realm-a --rgw-zonegroup=zone-group-a --rgw-zone=zone-a -``` - -In the other scenario, there are more than one zones in a zone group. - -Care must be taken when changing which zone is the master zone. - -Please read the following [documentation](https://docs.ceph.com/docs/master/radosgw/multisite/#changing-the-metadata-master-zone) before running the below commands: - -The following commands, run via toolboxes, remove the zone from the zone group first, then delete the zone. - -```console -radosgw-admin zonegroup rm --rgw-realm=realm-a --rgw-zonegroup=zone-group-a --rgw-zone=zone-a -radosgw-admin period update --commit --rgw-realm=realm-a --rgw-zonegroup=zone-group-a --rgw-zone=zone-a -radosgw-admin zone delete --rgw-realm=realm-a --rgw-zonegroup=zone-group-a --rgw-zone=zone-a -radosgw-admin period update --commit --rgw-realm=realm-a --rgw-zonegroup=zone-group-a --rgw-zone=zone-a -``` - -When a zone is deleted, the pools for that zone are not deleted. - -### Deleting Pools for a Zone - -The Rook toolbox can delete pools. Deleting pools should be done with caution. - -The following [documentation](https://docs.ceph.com/docs/master/rados/operations/pools/) on pools should be read before deleting any pools. - -When a zone is created the following pools are created for each zone: -``` -$ZONE_NAME.rgw.control -$ZONE_NAME.rgw.meta -$ZONE_NAME.rgw.log -$ZONE_NAME.rgw.buckets.index -$ZONE_NAME.rgw.buckets.non-ec -$ZONE_NAME.rgw.buckets.data -``` -Here is an example command to delete the .rgw.buckets.data pool for zone-a. - -```console -ceph osd pool rm zone-a.rgw.buckets.data zone-a.rgw.buckets.data --yes-i-really-really-mean-it -``` - -In this command the pool name **must** be mentioned twice for the pool to be removed. - -### Removing an Object Store from a Zone - -When an object-store (created in a zone) is deleted, the endpoint for that object store is removed from that zone, via -```console -kubectl delete -f object-store.yaml -``` - -Removing object store(s) from the master zone of the master zone group should be done with caution. When all of these object-stores are deleted the period cannot be updated and that realm cannot be pulled. diff --git a/Documentation/ceph-object-store-crd.md b/Documentation/ceph-object-store-crd.md deleted file mode 100644 index ee7c40fca..000000000 --- a/Documentation/ceph-object-store-crd.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: Object Store CRD -weight: 2800 -indent: true ---- - -# Ceph Object Store CRD - -Rook allows creation and customization of object stores through the custom resource definitions (CRDs). The following settings are available for Ceph object stores. - -## Sample - -### Erasure Coded - -Erasure coded pools can only be used with `dataPools`. The `metadataPool` must use a replicated pool. - -> **NOTE**: This sample requires *at least 3 bluestore OSDs*, with each OSD located on a *different node*. - -The OSDs must be located on different nodes, because the [`failureDomain`](ceph-pool-crd.md#spec) is set to `host` and the `erasureCoded` chunk settings require at least 3 different OSDs (2 `dataChunks` + 1 `codingChunks`). - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: my-store - namespace: rook-ceph -spec: - metadataPool: - failureDomain: host - replicated: - size: 3 - dataPool: - failureDomain: host - erasureCoded: - dataChunks: 2 - codingChunks: 1 - preservePoolsOnDelete: true - gateway: - # sslCertificateRef: - port: 80 - # securePort: 443 - instances: 1 - # A key/value list of annotations - annotations: - # key: value - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - rgw-node - # tolerations: - # - key: rgw-node - # operator: Exists - # podAffinity: - # podAntiAffinity: - # topologySpreadConstraints: - resources: - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - #zone: - #name: zone-a -``` - -## Object Store Settings - -### Metadata - -* `name`: The name of the object store to create, which will be reflected in the pool and other resource names. -* `namespace`: The namespace of the Rook cluster where the object store is created. - -### Pools - -The pools allow all of the settings defined in the Pool CRD spec. For more details, see the [Pool CRD](ceph-pool-crd.md) settings. In the example above, there must be at least three hosts (size 3) and at least three devices (2 data + 1 coding chunks) in the cluster. - -When the `zone` section is set pools with the object stores name will not be created since the object-store will the using the pools created by the ceph-object-zone. - -* `metadataPool`: The settings used to create all of the object store metadata pools. Must use replication. -* `dataPool`: The settings to create the object store data pool. Can use replication or erasure coding. -* `preservePoolsOnDelete`: If it is set to 'true' the pools used to support the object store will remain when the object store will be deleted. This is a security measure to avoid accidental loss of data. It is set to 'false' by default. If not specified is also deemed as 'false'. - -## Gateway Settings - -The gateway settings correspond to the RGW daemon settings. - -* `type`: `S3` is supported -* `sslCertificateRef`: If specified, this is the name of the Kubernetes secret(`opaque` or `tls` type) that contains the TLS certificate to be used for secure connections to the object store. Rook will look in the secret provided at the `cert` key name. The value of the `cert` key must be in the format expected by the [RGW service](https://docs.ceph.com/docs/master/install/ceph-deploy/install-ceph-gateway/#using-ssl-with-civetweb): "The server key, server certificate, and any other CA or intermediate certificates be supplied in one file. Each of these items must be in PEM form." -* `port`: The port on which the Object service will be reachable. If host networking is enabled, the RGW daemons will also listen on that port. If running on SDN, the RGW daemon listening port will be 8080 internally. -* `securePort`: The secure port on which RGW pods will be listening. A TLS certificate must be specified either via `sslCerticateRef` or `service.annotations` -* `instances`: The number of pods that will be started to load balance this object store. -* `externalRgwEndpoints`: A list of IP addresses to connect to external existing Rados Gateways (works with external mode). This setting will be ignored if the `CephCluster` does not have `external` spec enabled. Refer to the [external cluster section](ceph-cluster-crd.md#external-cluster) for more details. -* `annotations`: Key value pair list of annotations to add. -* `labels`: Key value pair list of labels to add. -* `placement`: The Kubernetes placement settings to determine where the RGW pods should be started in the cluster. -* `resources`: Set resource requests/limits for the Gateway Pod(s), see [Resource Requirements/Limits](ceph-cluster-crd.md#resource-requirementslimits). -* `priorityClassName`: Set priority class name for the Gateway Pod(s) -* `service`: The annotations to set on to the Kubernetes Service of RGW. The [service serving cert](https://docs.openshift.com/container-platform/4.6/security/certificates/service-serving-certificate.html) feature supported in Openshift is enabled by the following example: -```yaml -gateway: - service: - annotations: - service.beta.openshift.io/serving-cert-secret-name: -``` - -Example of external rgw endpoints to connect to: - -```yaml -gateway: - port: 80 - externalRgwEndpoints: - - ip: 192.168.39.182 -``` - -This will create a service with the endpoint `192.168.39.182` on port `80`, pointing to the Ceph object external gateway. -All the other settings from the gateway section will be ignored, except for `securePort`. - -## Zone Settings - -The [zone](ceph-object-multisite.md) settings allow the object store to join custom created [ceph-object-zone](ceph-object-multisite-crd.md). - -* `name`: the name of the ceph-object-zone the object store will be in. - -## Runtime settings - -### MIME types - -Rook provides a default `mime.types` file for each Ceph object store. This file is stored in a -Kubernetes ConfigMap with the name `rook-ceph-rgw--mime-types`. For most users, the -default file should suffice, however, the option is available to users to edit the `mime.types` -file in the ConfigMap as they desire. Users may have their own special file types, and particularly -security conscious users may wish to pare down the file to reduce the possibility of a file type -execution attack. - -Rook will not overwrite an existing `mime.types` ConfigMap so that user modifications will not be -destroyed. If the object store is destroyed and recreated, the ConfigMap will also be destroyed and -created anew. - -## Health settings - -Rook-Ceph will be default monitor the state of the object store endpoints. -The following CRD settings are available: - -* `healthCheck`: main object store health monitoring section - -Here is a complete example: - -```yaml -healthCheck: - bucket: - disabled: false - interval: 60s -``` - -The endpoint health check procedure is the following: - -1. Create an S3 user -2. Create a bucket with that user -3. PUT the file in the object store -4. GET the file from the object store -5. Verify object consistency -6. Update CR health status check - -Rook-Ceph always keeps the bucket and the user for the health check, it just does a PUT and GET of an s3 object since creating a bucket is an expensive operation. - -## Security settings - -Ceph RGW supports encryption via Key Management System (KMS) using HashiCorp Vault. Refer to the [vault kms section](ceph-cluster-crd.md#vault-kms) for detailed explanation. -If these settings are defined, then RGW establish a connection between Vault and whenever S3 client sends a request with Server Side Encryption, -it encrypts that using the key specified by the client. For more details w.r.t RGW, please refer [Ceph Vault documentation](https://docs.ceph.com/en/latest/radosgw/vault/) - -The `security` section contains settings related to KMS encryption of the RGW. - -```yaml -security: - kms: - connectionDetails: - KMS_PROVIDER: vault - VAULT_ADDR: http://vault.default.svc.cluster.local:8200 - VAULT_BACKEND_PATH: rgw - VAULT_SECRET_ENGINE: kv - VAULT_BACKEND: v2 - # name of the k8s secret containing the kms authentication token - tokenSecretName: rgw-vault-token -``` - -For RGW, please note the following: - -* `VAULT_SECRET_ENGINE` option is specifically for RGW to mention about the secret engine which can be used, currently supports two: [kv](https://www.vaultproject.io/docs/secrets/kv) and [transit](https://www.vaultproject.io/docs/secrets/transit). And for kv engine only version 2 is supported. -* The Storage administrator needs to create a secret in the Vault server so that S3 clients use that key for encryption -$ vault kv put rook/ key=$(openssl rand -base64 32) # kv engine -$ vault write -f transit/keys/ exportable=true # transit engine - -* TLS authentication with custom certs between Vault and RGW are yet to be supported. - -## Deleting a CephObjectStore - -During deletion of a CephObjectStore resource, Rook protects against accidental or premature -destruction of user data by blocking deletion if there are any object buckets in the object store -being deleted. Buckets may have been created by users or by ObjectBucketClaims. - -For deletion to be successful, all buckets in the object store must be removed. This may require -manual deletion or removal of all ObjectBucketClaims. Alternately, the -`cephobjectstore.ceph.rook.io` finalizer on the CephObjectStore can be removed to remove the -Kubernetes Custom Resource, but the Ceph pools which store the data will not be removed in this case. - -Rook will warn about which buckets are blocking deletion in three ways: -1. An event will be registered on the CephObjectStore resource -1. A status condition will be added to the CephObjectStore resource -1. An error will be added to the Rook-Ceph Operator log diff --git a/Documentation/ceph-object-store-user-crd.md b/Documentation/ceph-object-store-user-crd.md deleted file mode 100644 index 34a77d45c..000000000 --- a/Documentation/ceph-object-store-user-crd.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Object Store User CRD -weight: 2900 -indent: true ---- - -# Ceph Object Store User CRD - -Rook allows creation and customization of object store users through the custom resource definitions (CRDs). The following settings are available -for Ceph object store users. - -## Sample - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephObjectStoreUser -metadata: - name: my-user - namespace: rook-ceph -spec: - store: my-store - displayName: my-display-name -``` - -## Object Store User Settings - -### Metadata - -* `name`: The name of the object store user to create, which will be reflected in the secret and other resource names. -* `namespace`: The namespace of the Rook cluster where the object store user is created. - -### Spec - -* `store`: The object store in which the user will be created. This matches the name of the objectstore CRD. -* `displayName`: The display name which will be passed to the `radosgw-admin user create` command. diff --git a/Documentation/ceph-object.md b/Documentation/ceph-object.md deleted file mode 100644 index 6a9270425..000000000 --- a/Documentation/ceph-object.md +++ /dev/null @@ -1,378 +0,0 @@ ---- -title: Object Storage -weight: 2200 -indent: true ---- - -# Object Storage - -Object storage exposes an S3 API to the storage cluster for applications to put and get data. - -## Prerequisites - -This guide assumes a Rook cluster as explained in the [Quickstart](ceph-quickstart.md). - -## Configure an Object Store - -Rook has the ability to either deploy an object store in Kubernetes or to connect to an external RGW service. -Most commonly, the object store will be configured locally by Rook. -Alternatively, if you have an existing Ceph cluster with Rados Gateways, see the -[external section](#connect-to-an-external-object-store) to consume it from Rook. - -### Create a Local Object Store - -The below sample will create a `CephObjectStore` that starts the RGW service in the cluster with an S3 API. - -> **NOTE**: This sample requires *at least 3 bluestore OSDs*, with each OSD located on a *different node*. - -The OSDs must be located on different nodes, because the [`failureDomain`](ceph-pool-crd.md#spec) is set to `host` and the `erasureCoded` chunk settings require at least 3 different OSDs (2 `dataChunks` + 1 `codingChunks`). - -See the [Object Store CRD](ceph-object-store-crd.md#object-store-settings), for more detail on the settings available for a `CephObjectStore`. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: my-store - namespace: rook-ceph -spec: - metadataPool: - failureDomain: host - replicated: - size: 3 - dataPool: - failureDomain: host - erasureCoded: - dataChunks: 2 - codingChunks: 1 - preservePoolsOnDelete: true - gateway: - sslCertificateRef: - port: 80 - # securePort: 443 - instances: 1 - healthCheck: - bucket: - disabled: false - interval: 60s -``` - -After the `CephObjectStore` is created, the Rook operator will then create all the pools and other resources necessary to start the service. This may take a minute to complete. - -```console -# Create the object store -kubectl create -f object.yaml - -# To confirm the object store is configured, wait for the rgw pod to start -kubectl -n rook-ceph get pod -l app=rook-ceph-rgw -``` - -### Connect to an External Object Store - -Rook can connect to existing RGW gateways to work in conjunction with the external mode of the `CephCluster` CRD. -If you have an external `CephCluster` CR, you can instruct Rook to consume external gateways with the following: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: external-store - namespace: rook-ceph -spec: - gateway: - port: 8080 - externalRgwEndpoints: - - ip: 192.168.39.182 - healthCheck: - bucket: - enabled: true - interval: 60s -``` - -You can use the existing `object-external.yaml` file. -When ready the ceph-object-controller will output a message in the Operator log similar to this one: - ->``` ->ceph-object-controller: ceph object store gateway service >running at 10.100.28.138:8080 ->``` - -You can now get and access the store via: - -```console -kubectl -n rook-ceph get svc -l app=rook-ceph-rgw -``` - ->``` ->NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ->rook-ceph-rgw-my-store ClusterIP 10.100.28.138 8080/TCP 6h59m ->``` - -Any pod from your cluster can now access this endpoint: - -```console -$ curl 10.100.28.138:8080 -``` - ->``` ->anonymous ->``` - -It is also possible to use the internally registered DNS name: - -```console -curl rook-ceph-rgw-my-store.rook-ceph:8080 -``` - -```console -anonymous -``` - -The DNS name is created with the following schema `rook-ceph-rgw-$STORE_NAME.$NAMESPACE`. - -## Create a Bucket - -Now that the object store is configured, next we need to create a bucket where a client can read and write objects. A bucket can be created by defining a storage class, similar to the pattern used by block and file storage. -First, define the storage class that will allow object clients to create a bucket. -The storage class defines the object storage system, the bucket retention policy, and other properties required by the administrator. Save the following as `storageclass-bucket-delete.yaml` (the example is named as such due to the `Delete` reclaim policy). - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-bucket -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.ceph.rook.io/bucket -reclaimPolicy: Delete -parameters: - objectStoreName: my-store - objectStoreNamespace: rook-ceph - region: us-east-1 -``` -If you’ve deployed the Rook operator in a namespace other than `rook-ceph`, change the prefix in the provisioner to match the namespace you used. For example, if the Rook operator is running in the namespace `my-namespace` the provisioner value should be `my-namespace.ceph.rook.io/bucket`. -```console -kubectl create -f storageclass-bucket-delete.yaml -``` - -Based on this storage class, an object client can now request a bucket by creating an Object Bucket Claim (OBC). -When the OBC is created, the Rook-Ceph bucket provisioner will create a new bucket. Notice that the OBC -references the storage class that was created above. -Save the following as `object-bucket-claim-delete.yaml` (the example is named as such due to the `Delete` reclaim policy): - -```yaml -apiVersion: objectbucket.io/v1alpha1 -kind: ObjectBucketClaim -metadata: - name: ceph-bucket -spec: - generateBucketName: ceph-bkt - storageClassName: rook-ceph-bucket -``` - -```console -kubectl create -f object-bucket-claim-delete.yaml -``` - -Now that the claim is created, the operator will create the bucket as well as generate other artifacts to enable access to the bucket. A secret and ConfigMap are created with the same name as the OBC and in the same namespace. -The secret contains credentials used by the application pod to access the bucket. -The ConfigMap contains bucket endpoint information and is also consumed by the pod. -See the [Object Bucket Claim Documentation](ceph-object-bucket-claim.md) for more details on the `CephObjectBucketClaims`. - -### Client Connections - -The following commands extract key pieces of information from the secret and configmap:" - -```bash -#config-map, secret, OBC will part of default if no specific name space mentioned -export AWS_HOST=$(kubectl -n default get cm ceph-bucket -o jsonpath='{.data.BUCKET_HOST}') -export AWS_ACCESS_KEY_ID=$(kubectl -n default get secret ceph-bucket -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode) -export AWS_SECRET_ACCESS_KEY=$(kubectl -n default get secret ceph-bucket -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode) -``` - -## Consume the Object Storage - -Now that you have the object store configured and a bucket created, you can consume the -object storage from an S3 client. - -This section will guide you through testing the connection to the `CephObjectStore` and uploading and downloading from it. -Run the following commands after you have connected to the [Rook toolbox](ceph-toolbox.md). - -### Connection Environment Variables - -To simplify the s3 client commands, you will want to set the four environment variables for use by your client (ie. inside the toolbox). -See above for retrieving the variables for a bucket created by an `ObjectBucketClaim`. - -```bash -export AWS_HOST= -export AWS_ENDPOINT= -export AWS_ACCESS_KEY_ID= -export AWS_SECRET_ACCESS_KEY= -``` - -* `Host`: The DNS host name where the rgw service is found in the cluster. Assuming you are using the default `rook-ceph` cluster, it will be `rook-ceph-rgw-my-store.rook-ceph`. -* `Endpoint`: The endpoint where the rgw service is listening. Run `kubectl -n rook-ceph get svc rook-ceph-rgw-my-store`, then combine the clusterIP and the port. -* `Access key`: The user's `access_key` as printed above -* `Secret key`: The user's `secret_key` as printed above - -The variables for the user generated in this example might be: - -```bash -export AWS_HOST=rook-ceph-rgw-my-store.rook-ceph -export AWS_ENDPOINT=10.104.35.31:80 -export AWS_ACCESS_KEY_ID=XEZDB3UJ6X7HVBE7X7MA -export AWS_SECRET_ACCESS_KEY=7yGIZON7EhFORz0I40BFniML36D2rl8CQQ5kXU6l -``` - -The access key and secret key can be retrieved as described in the section above on [client connections](#client-connections) or -below in the section [creating a user](#create-a-user) if you are not creating the buckets with an `ObjectBucketClaim`. - -### Install s3cmd - -To test the `CephObjectStore` we will install the `s3cmd` tool into the toolbox pod. - -```console -yum --assumeyes install s3cmd -``` - -### PUT or GET an object - -Upload a file to the newly created bucket - -```console -echo "Hello Rook" > /tmp/rookObj -s3cmd put /tmp/rookObj --no-ssl --host=${AWS_HOST} --host-bucket= s3://rookbucket -``` - -Download and verify the file from the bucket - -```console -s3cmd get s3://rookbucket/rookObj /tmp/rookObj-download --no-ssl --host=${AWS_HOST} --host-bucket= -cat /tmp/rookObj-download -``` - -## Access External to the Cluster - -Rook sets up the object storage so pods will have access internal to the cluster. If your applications are running outside the cluster, -you will need to setup an external service through a `NodePort`. - -First, note the service that exposes RGW internal to the cluster. We will leave this service intact and create a new service for external access. - -```console -kubectl -n rook-ceph get service rook-ceph-rgw-my-store -``` - ->``` ->NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE ->rook-ceph-rgw-my-store 10.3.0.177 80/TCP 2m ->``` - -Save the external service as `rgw-external.yaml`: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: rook-ceph-rgw-my-store-external - namespace: rook-ceph - labels: - app: rook-ceph-rgw - rook_cluster: rook-ceph - rook_object_store: my-store -spec: - ports: - - name: rgw - port: 80 - protocol: TCP - targetPort: 80 - selector: - app: rook-ceph-rgw - rook_cluster: rook-ceph - rook_object_store: my-store - sessionAffinity: None - type: NodePort -``` - -Now create the external service. - -```console -kubectl create -f rgw-external.yaml -``` - -See both rgw services running and notice what port the external service is running on: - -```console -kubectl -n rook-ceph get service rook-ceph-rgw-my-store rook-ceph-rgw-my-store-external -``` - ->``` ->NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ->rook-ceph-rgw-my-store ClusterIP 10.104.82.228 80/TCP 4m ->rook-ceph-rgw-my-store-external NodePort 10.111.113.237 80:31536/TCP 39s ->``` - -Internally the rgw service is running on port `80`. The external port in this case is `31536`. Now you can access the `CephObjectStore` from anywhere! All you need is the hostname for any machine in the cluster, the external port, and the user credentials. - -## Create a User - -If you need to create an independent set of user credentials to access the S3 endpoint, -create a `CephObjectStoreUser`. The user will be used to connect to the RGW service in the cluster using the S3 API. -The user will be independent of any object bucket claims that you might have created in the earlier -instructions in this document. - -See the [Object Store User CRD](ceph-object-store-user-crd.md) for more detail on the settings available for a `CephObjectStoreUser`. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephObjectStoreUser -metadata: - name: my-user - namespace: rook-ceph -spec: - store: my-store - displayName: "my display name" -``` - -When the `CephObjectStoreUser` is created, the Rook operator will then create the RGW user on the specified `CephObjectStore` and store the Access Key and Secret Key in a kubernetes secret in the same namespace as the `CephObjectStoreUser`. - -```console -# Create the object store user -kubectl create -f object-user.yaml -``` - -```console -# To confirm the object store user is configured, describe the secret -kubectl -n rook-ceph describe secret rook-ceph-object-user-my-store-my-user -``` - ->``` ->Name: rook-ceph-object-user-my-store-my-user ->Namespace: rook-ceph ->Labels: app=rook-ceph-rgw -> rook_cluster=rook-ceph -> rook_object_store=my-store ->Annotations: -> ->Type: kubernetes.io/rook -> ->Data ->==== ->AccessKey: 20 bytes ->SecretKey: 40 bytes ->``` - -The AccessKey and SecretKey data fields can be mounted in a pod as an environment variable. More information on consuming -kubernetes secrets can be found in the [K8s secret documentation](https://kubernetes.io/docs/concepts/configuration/secret/) - -To directly retrieve the secrets: - -```console -kubectl -n rook-ceph get secret rook-ceph-object-user-my-store-my-user -o jsonpath='{.data.AccessKey}' | base64 --decode -kubectl -n rook-ceph get secret rook-ceph-object-user-my-store-my-user -o jsonpath='{.data.SecretKey}' | base64 --decode -``` - -## Object Multisite - -Multisite is a feature of Ceph that allows object stores to replicate its data over multiple Ceph clusters. - -Multisite also allows object stores to be independent and isloated from other object stores in a cluster. - -For more information on multisite please read the [ceph multisite overview](ceph-object-multisite.md) for how to run it. diff --git a/Documentation/ceph-openshift-issues.md b/Documentation/ceph-openshift-issues.md deleted file mode 100644 index d637e1b63..000000000 --- a/Documentation/ceph-openshift-issues.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: OpenShift Common Issues -weight: 11400 -indent: true ---- - -# OpenShift Common Issues - -## Enable Monitoring in the Storage Dashboard - -OpenShift Console uses OpenShift Prometheus for monitoring and populating data in Storage Dashboard. Additional configuration is required to monitor the Ceph Cluster from the storage dashboard. - -1. Change the monitoring namespace to `openshift-monitoring` - - Change the namespace of the RoleBinding `rook-ceph-metrics` from `rook-ceph` to `openshift-monitoring` for the `prometheus-k8s` ServiceAccount in [rbac.yaml](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/monitoring/rbac.yaml#L70). - -``` -subjects: -- kind: ServiceAccount - name: prometheus-k8s - namespace: openshift-monitoring -``` - -2. Enable Ceph Cluster monitoring - - Follow [ceph-monitoring/prometheus-alerts](ceph-monitoring.md#prometheus-alerts). - -3. Set the required label on the namespace - - `oc label namespace rook-ceph "openshift.io/cluster-monitoring=true"` - -## Troubleshoot Monitoring Issues - -> **Pre-req:** Switch to `rook-ceph` namespace with `oc project rook-ceph` - -1. Ensure ceph-mgr pod is Running - - ```console - oc get pods -l app=rook-ceph-mgr - ``` - - >``` - >NAME READY STATUS RESTARTS AGE - >rook-ceph-mgr 1/1 Running 0 14h - >``` - -2. Ensure service monitor is present - - ```console - oc get servicemonitor rook-ceph-mgr - ``` - - >``` - >NAME AGE - >rook-ceph-mgr 14h - >``` - -3. Ensure prometheus rules are present - - ```console - oc get prometheusrules -l prometheus=rook-prometheus - ``` - - >``` - >NAME AGE - >prometheus-ceph-rules 14h - >``` diff --git a/Documentation/ceph-openshift.md b/Documentation/ceph-openshift.md deleted file mode 100644 index 5c5497838..000000000 --- a/Documentation/ceph-openshift.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: OpenShift -weight: 2060 -indent: true ---- -{% include_relative branch.liquid %} - -# OpenShift - -[OpenShift](https://www.openshift.com/) adds a number of security and other enhancements to Kubernetes. In particular, [security context constraints](https://blog.openshift.com/understanding-service-accounts-sccs/) allow the cluster admin to define exactly which permissions are allowed to pods running in the cluster. You will need to define those permissions that allow the Rook pods to run. - -The settings for Rook in OpenShift are described below, and are also included in the [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph): - -* `operator-openshift.yaml`: Creates the security context constraints and starts the operator deployment -* `object-openshift.yaml`: Creates an object store with rgw listening on a valid port number for OpenShift - -## TL;DR - -To create an OpenShift cluster, the commands basically include: - -```console -oc create -f crds.yaml -f common.yaml -oc create -f operator-openshift.yaml -oc create -f cluster.yaml -``` - -## Rook Privileges - -To orchestrate the storage platform, Rook requires the following access in the cluster: - -* Create `hostPath` volumes, for persistence by the Ceph mon and osd pods -* Run pods in `privileged` mode, for access to `/dev` and `hostPath` volumes -* Host networking for the Rook agent and clusters that require host networking -* Ceph OSDs require host PIDs for communication on the same node - -## Security Context Constraints - -Before starting the Rook operator or cluster, create the security context constraints needed by the Rook pods. The following yaml is found in `operator-openshift.yaml` under `/cluster/examples/kubernetes/ceph`. - -> **NOTE**: Older versions of OpenShift may require `apiVersion: v1`. - -```yaml -kind: SecurityContextConstraints -apiVersion: security.openshift.io/v1 -metadata: - name: rook-ceph -allowPrivilegedContainer: true -allowHostNetwork: true -allowHostDirVolumePlugin: true -priority: -allowedCapabilities: [] -allowHostPorts: false -allowHostPID: true -allowHostIPC: false -readOnlyRootFilesystem: false -requiredDropCapabilities: [] -defaultAddCapabilities: [] -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -fsGroup: - type: MustRunAs -supplementalGroups: - type: RunAsAny -allowedFlexVolumes: - - driver: "ceph.rook.io/rook" - - driver: "ceph.rook.io/rook-ceph" -volumes: - - configMap - - downwardAPI - - emptyDir - - flexVolume - - hostPath - - persistentVolumeClaim - - projected - - secret -users: - # A user needs to be added for each rook service account. - # This assumes running in the default sample "rook-ceph" namespace. - # If other namespaces or service accounts are configured, they need to be updated here. - - system:serviceaccount:rook-ceph:rook-ceph-system - - system:serviceaccount:rook-ceph:default - - system:serviceaccount:rook-ceph:rook-ceph-mgr - - system:serviceaccount:rook-ceph:rook-ceph-osd -``` - -Important to note is that if you plan on running Rook in namespaces other than the default `rook-ceph`, the example scc will need to be modified to accommodate for your namespaces where the Rook pods are running. - -To create the scc you will need a privileged account: - -```console -oc login -u system:admin -``` - -We will create the security context constraints with the operator in the next section. - -## Rook Settings - -There are some Rook settings that also need to be adjusted to work in OpenShift. - -### Operator Settings - -There is an environment variable that needs to be set in the operator spec that will allow Rook to run in OpenShift clusters. - -* `ROOK_HOSTPATH_REQUIRES_PRIVILEGED`: Must be set to `true`. Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux, the pod must be running privileged in order to write to the hostPath volume. - -```yaml -- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED - value: "true" -``` - -Now create the security context constraints and the operator: - -```console -oc create -f operator-openshift.yaml -``` - -### Cluster Settings - -The cluster settings in `cluster.yaml` are largely isolated from the differences in OpenShift. There is perhaps just one to take note of: - -* `dataDirHostPath`: Ensure that it points to a valid, writable path on the host systems. - -### Object Store Settings - -In OpenShift, ports less than 1024 cannot be bound. In the [object store CRD](ceph-object.md), ensure the port is modified to meet this requirement. - -```yaml -gateway: - port: 8080 -``` - -You can expose a different port such as `80` by creating a service. - -A sample object store can be created with these settings: - -```console -oc create -f object-openshift.yaml -``` diff --git a/Documentation/ceph-osd-mgmt.md b/Documentation/ceph-osd-mgmt.md deleted file mode 100644 index d12f7eb07..000000000 --- a/Documentation/ceph-osd-mgmt.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: OSD Management -weight: 11140 -indent: true ---- -{% include_relative branch.liquid %} - -# Ceph OSD Management - -Ceph Object Storage Daemons (OSDs) are the heart and soul of the Ceph storage platform. -Each OSD manages a local device and together they provide the distributed storage. Rook will automate creation and management of OSDs to hide the complexity -based on the desired state in the CephCluster CR as much as possible. This guide will walk through some of the scenarios -to configure OSDs where more configuration may be required. - -## OSD Health - -The [rook-ceph-tools pod](./ceph-toolbox.md) provides a simple environment to run Ceph tools. The `ceph` commands -mentioned in this document should be run from the toolbox. - -Once the is created, connect to the pod to execute the `ceph` commands to analyze the health of the cluster, -in particular the OSDs and placement groups (PGs). Some common commands to analyze OSDs include: -``` -ceph status -ceph osd tree -ceph osd status -ceph osd df -ceph osd utilization -``` - -```console -kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') bash -``` - -## Add an OSD - -The [QuickStart Guide](ceph-quickstart.md) will provide the basic steps to create a cluster and start some OSDs. For more details on the OSD -settings also see the [Cluster CRD](ceph-cluster-crd.md) documentation. If you are not seeing OSDs created, see the [Ceph Troubleshooting Guide](ceph-common-issues.md). - -To add more OSDs, Rook will automatically watch for new nodes and devices being added to your cluster. -If they match the filters or other settings in the `storage` section of the cluster CR, the operator -will create new OSDs. - -## Add an OSD on a PVC - -In more dynamic environments where storage can be dynamically provisioned with a raw block storage provider, the OSDs can be backed -by PVCs. See the `storageClassDeviceSets` documentation in the [Cluster CRD](ceph-cluster-crd.md#storage-class-device-sets) topic. - -To add more OSDs, you can either increase the `count` of the OSDs in an existing device set or you can -add more device sets to the cluster CR. The operator will then automatically create new OSDs according -to the updated cluster CR. - -## Remove an OSD - -To remove an OSD due to a failed disk or other re-configuration, consider the following to ensure the health of the data -through the removal process: -- Confirm you will have enough space on your cluster after removing your OSDs to properly handle the deletion -- Confirm the remaining OSDs and their placement groups (PGs) are healthy in order to handle the rebalancing of the data -- Do not remove too many OSDs at once -- Wait for rebalancing between removing multiple OSDs - -If all the PGs are `active+clean` and there are no warnings about being low on space, this means the data is fully replicated -and it is safe to proceed. If an OSD is failing, the PGs will not be perfectly clean and you will need to proceed anyway. - -### Host-based cluster - -Update your CephCluster CR. Depending on your CR settings, you may need to remove the device from the list or update the device filter. -If you are using `useAllDevices: true`, no change to the CR is necessary. - -**IMPORTANT: On host-based clusters, you may need to stop the Rook Operator while performing OSD -removal steps in order to prevent Rook from detecting the old OSD and trying to re-create it before -the disk is wiped or removed.** - -To stop the Rook Operator, run -`kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=0`. - -You must perform steps below to (1) purge the OSD and either (2.a) delete the underlying data or -(2.b)replace the disk before starting the Rook Operator again. - -Once you have done that, you can start the Rook operator again with -`kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=1`. - -### PVC-based cluster - -To reduce the storage in your cluster or remove a failed OSD on a PVC: - -1. Shrink the number of OSDs in the `storageClassDeviceSet` in the CephCluster CR. If you have multiple device sets, - you may need to change the index of `0` in this example path. - - `kubectl -n rook-ceph patch CephCluster rook-ceph --type=json -p '[{"op": "replace", "path": "/spec/storage/storageClassDeviceSets/0/count", "value":}]'` - - Reduce the `count` of the OSDs to the desired number. Rook will not take any action to automatically remove the extra OSD(s). -2. Identify the PVC that belongs to the OSD that is failed or otherwise being removed. - - `kubectl -n rook-ceph get pvc -l ceph.rook.io/DeviceSet=` -3. Identify the OSD you desire to remove. - - The OSD assigned to the PVC can be found in the labels on the PVC - - `kubectl -n rook-ceph get pod -l ceph.rook.io/pvc= -o yaml | grep ceph-osd-id` - - For example, this might return: `ceph-osd-id: "0"` - - Remember the OSD ID for purging the OSD below - -If you later increase the count in the device set, note that the operator will create PVCs with the highest index -that is not currently in use by existing OSD PVCs. - -### Confirm the OSD is down - -If you want to remove an unhealthy OSD, the osd pod may be in an error state such as `CrashLoopBackoff` or the `ceph` commands -in the toolbox may show which OSD is `down`. If you want to remove a healthy OSD, you should run -`kubectl -n rook-ceph scale deployment rook-ceph-osd- --replicas=0` and `ceph osd down osd.` from the toolbox. - -### Purge the OSD from the Ceph cluster - -OSD removal can be automated with the example found in the [rook-ceph-purge-osd job](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/osd-purge.yaml). -In the osd-purge.yaml, change the `` to the ID(s) of the OSDs you want to remove. - -1. Run the job: `kubectl create -f osd-purge.yaml` -2. When the job is completed, review the logs to ensure success: `kubectl -n rook-ceph logs -l app=rook-ceph-purge-osd` -3. When finished, you can delete the job: `kubectl delete -f osd-purge.yaml` - -If you want to remove OSDs by hand, continue with the following sections. However, we recommend you to use the above-mentioned job to avoid operation errors. - -### Purge the OSD manually - -If the OSD purge job fails or you need fine-grained control of the removal, here are the individual commands that can be run from the toolbox. - -1. Detach the OSD PVC from Rook - - `kubectl -n rook-ceph label pvc ceph.rook.io/DeviceSetPVCId-` -2. Mark the OSD as `out` if not already marked as such by Ceph. This signals Ceph to start moving (backfilling) the data that was on that OSD to another OSD. - - `ceph osd out osd.` (for example if the OSD ID is 23 this would be `ceph osd out osd.23`) -3. Wait for the data to finish backfilling to other OSDs. - - `ceph status` will indicate the backfilling is done when all of the PGs are `active+clean`. If desired, it's safe to remove the disk after that. -4. Remove the OSD from the Ceph cluster - - `ceph osd purge --yes-i-really-mean-it` -5. Verify the OSD is removed from the node in the CRUSH map - - `ceph osd tree` - -The operator can automatically remove OSD deployments that are considered "safe-to-destroy" by Ceph. -After the steps above, the OSD will be considered safe to remove since the data has all been moved -to other OSDs. But this will only be done automatically by the operator if you have this setting in the cluster CR: -``` -removeOSDsIfOutAndSafeToRemove: true -``` - -Otherwise, you will need to delete the deployment directly: - - `kubectl delete deployment -n rook-ceph rook-ceph-osd-` - -In PVC-based cluster, remove the orphaned PVC, if necessary. - -### Delete the underlying data - -If you want to clean the device where the OSD was running, see in the instructions to -wipe a disk on the [Cleaning up a Cluster](ceph-teardown.md#delete-the-data-on-hosts) topic. - -## Replace an OSD - -To replace a disk that has failed: - -1. Run the steps in the previous section to [Remove an OSD](#remove-an-osd). -2. Replace the physical device and verify the new device is attached. -3. Check if your cluster CR will find the new device. If you are using `useAllDevices: true` you can skip this step. -If your cluster CR lists individual devices or uses a device filter you may need to update the CR. -4. The operator ideally will automatically create the new OSD within a few minutes of adding the new device or updating the CR. -If you don't see a new OSD automatically created, restart the operator (by deleting the operator pod) to trigger the OSD creation. -5. Verify if the OSD is created on the node by running `ceph osd tree` from the toolbox. - -Note that the OSD might have a different ID than the previous OSD that was replaced. diff --git a/Documentation/ceph-pool-crd.md b/Documentation/ceph-pool-crd.md deleted file mode 100644 index 4ff9ae49c..000000000 --- a/Documentation/ceph-pool-crd.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -title: Block Pool CRD -weight: 2700 -indent: true ---- - -# Ceph Block Pool CRD - -Rook allows creation and customization of storage pools through the custom resource definitions (CRDs). The following settings are available for pools. - -## Samples - -### Replicated - -For optimal performance, while also adding redundancy, this sample will configure Ceph to make three full copies of the data on multiple nodes. - -> **NOTE**: This sample requires *at least 1 OSD per node*, with each OSD located on *3 different nodes*. - -Each OSD must be located on a different node, because the [`failureDomain`](ceph-pool-crd.md#spec) is set to `host` and the `replicated.size` is set to `3`. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - failureDomain: host - replicated: - size: 3 - deviceClass: hdd -``` - -#### Hybrid Storage Pools -Hybrid storage is a combination of two different storage tiers. For example, SSD and HDD. -This helps to improve the read performance of cluster by placing, say, 1st copy of data on the higher performance tier (SSD or NVME) and remaining replicated copies on lower cost tier (HDDs). - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - failureDomain: host - replicated: - size: 3 - hybridStorage: - primaryDeviceClass: ssd - secondaryDeviceClass: hdd -``` -> **IMPORTANT**: The device classes `primaryDeviceClass` and `secondaryDeviceClass` must have at least one OSD associated with them or else the pool creation will fail. - -### Erasure Coded - -This sample will lower the overall storage capacity requirement, while also adding redundancy by using [erasure coding](#erasure-coding). - -> **NOTE**: This sample requires *at least 3 bluestore OSDs*. - -The OSDs can be located on a single Ceph node or spread across multiple nodes, because the [`failureDomain`](ceph-pool-crd.md#spec) is set to `osd` and the `erasureCoded` chunk settings require at least 3 different OSDs (2 `dataChunks` + 1 `codingChunks`). - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: ecpool - namespace: rook-ceph -spec: - failureDomain: osd - erasureCoded: - dataChunks: 2 - codingChunks: 1 - deviceClass: hdd -``` - -High performance applications typically will not use erasure coding due to the performance overhead of creating and distributing the chunks in the cluster. - -When creating an erasure-coded pool, it is highly recommended to create the pool when you have **bluestore OSDs** in your cluster -(see the [OSD configuration settings](ceph-cluster-crd.md#osd-configuration-settings). Filestore OSDs have -[limitations](http://docs.ceph.com/docs/master/rados/operations/erasure-code/#erasure-coding-with-overwrites) that are unsafe and lower performance. - -### Mirroring - -RADOS Block Device (RBD) mirroring is a process of asynchronous replication of Ceph block device images between two or more Ceph clusters. -Mirroring ensures point-in-time consistent replicas of all changes to an image, including reads and writes, block device resizing, snapshots, clones and flattening. -It is generally useful when planning for Disaster Recovery. -Mirroring is for clusters that are geographically distributed and stretching a single cluster is not possible due to high latencies. - -The following will enable mirroring of the pool at the image level: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - replicated: - size: 3 - mirroring: - enabled: true - mode: image - # schedule(s) of snapshot - snapshotSchedules: - - interval: 24h # daily snapshots - startTime: 14:00:00-05:00 -``` - -Once mirroring is enabled, Rook will by default create its own [bootstrap peer token](https://docs.ceph.com/docs/master/rbd/rbd-mirroring/#bootstrap-peers) so that it can be used by another cluster. -The bootstrap peer token can be found in a Kubernetes Secret. The name of the Secret is present in the Status field of the CephBlockPool CR: - -```yaml -status: - info: - rbdMirrorBootstrapPeerSecretName: pool-peer-token-replicapool -``` - -This secret can then be fetched like so: - -```console -kubectl get secret -n rook-ceph pool-peer-token-replicapool -o jsonpath='{.data.token}'|base64 -d -``` ->``` ->eyJmc2lkIjoiOTFlYWUwZGQtMDZiMS00ZDJjLTkxZjMtMTMxMWM5ZGYzODJiIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFEN1psOWZ3V1VGRHhBQWdmY0gyZi8xeUhYeGZDUTU5L1N0NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjEwLjEwMS4xOC4yMjM6MzMwMCx2MToxMC4xMDEuMTguMjIzOjY3ODldIn0= ->``` - -The secret must be decoded. The result will be another base64 encoded blob that you will import in the destination cluster: - -```console -external-cluster-console # rbd mirror pool peer bootstrap import -``` - -See the official rbd mirror documentation on [how to add a bootstrap peer](https://docs.ceph.com/docs/master/rbd/rbd-mirroring/#bootstrap-peers). - -### Data spread across subdomains - -Imagine the following topology with datacenters containing racks and then hosts: - -```text -. -├── datacenter-1 -│ ├── rack-1 -│ │ ├── host-1 -│ │ ├── host-2 -│ └── rack-2 -│ ├── host-3 -│ ├── host-4 -└── datacenter-2 - ├── rack-3 - │ ├── host-5 - │ ├── host-6 - └── rack-4 - ├── host-7 - └── host-8 -``` - -As an administrator I would like to place 4 copies across both datacenter where each copy inside a datacenter is across a rack. -This can be achieved by the following: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - replicated: - size: 4 - replicasPerFailureDomain: 2 - subFailureDomain: rack -``` - -## Pool Settings - -### Metadata - -* `name`: The name of the pool to create. -* `namespace`: The namespace of the Rook cluster where the pool is created. - -### Spec - -* `replicated`: Settings for a replicated pool. If specified, `erasureCoded` settings must not be specified. - * `size`: The desired number of copies to make of the data in the pool. - * `requireSafeReplicaSize`: set to false if you want to create a pool with size 1, setting pool size 1 could lead to data loss without recovery. Make sure you are *ABSOLUTELY CERTAIN* that is what you want. - * `replicasPerFailureDomain`: Sets up the number of replicas to place in a given failure domain. For instance, if the failure domain is a datacenter (cluster is -stretched) then you will have 2 replicas per datacenter where each replica ends up on a different host. This gives you a total of 4 replicas and for this, the `size` must be set to 4. The default is 1. - * `subFailureDomain`: Name of the CRUSH bucket representing a sub-failure domain. In a stretched configuration this option represent the "last" bucket where replicas will end up being written. Imagine the cluster is stretched across two datacenters, you can then have 2 copies per datacenter and each copy on a different CRUSH bucket. The default is "host". -* `erasureCoded`: Settings for an erasure-coded pool. If specified, `replicated` settings must not be specified. See below for more details on [erasure coding](#erasure-coding). - * `dataChunks`: Number of chunks to divide the original object into - * `codingChunks`: Number of coding chunks to generate -* `failureDomain`: The failure domain across which the data will be spread. This can be set to a value of either `osd` or `host`, with `host` being the default setting. A failure domain can also be set to a different type (e.g. `rack`), if it is added as a `location` in the [Storage Selection Settings](ceph-cluster-crd.md#storage-selection-settings). - If a `replicated` pool of size `3` is configured and the `failureDomain` is set to `host`, all three copies of the replicated data will be placed on OSDs located on `3` different Ceph hosts. This case is guaranteed to tolerate a failure of two hosts without a loss of data. Similarly, a failure domain set to `osd`, can tolerate a loss of two OSD devices. - - If erasure coding is used, the data and coding chunks are spread across the configured failure domain. - - > **NOTE**: Neither Rook, nor Ceph, prevent the creation of a cluster where the replicated data (or Erasure Coded chunks) can be written safely. By design, Ceph will delay checking for suitable OSDs until a write request is made and this write can hang if there are not sufficient OSDs to satisfy the request. -* `deviceClass`: Sets up the CRUSH rule for the pool to distribute data only on the specified device class. If left empty or unspecified, the pool will use the cluster's default CRUSH root, which usually distributes data over all OSDs, regardless of their class. -* `crushRoot`: The root in the crush map to be used by the pool. If left empty or unspecified, the default root will be used. Creating a crush hierarchy for the OSDs currently requires the Rook toolbox to run the Ceph tools described [here](http://docs.ceph.com/docs/master/rados/operations/crush-map/#modifying-the-crush-map). -* `enableRBDStats`: Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false. For more info see the [ceph documentation](https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics). - -* `parameters`: Sets any [parameters](https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values) listed to the given pool - * `target_size_ratio:` gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool, for more info see the [ceph documentation](https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size) - * `compression_mode`: Sets up the pool for inline compression when using a Bluestore OSD. If left unspecified does not setup any compression mode for the pool. Values supported are the same as Bluestore inline compression [modes](https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression), such as `none`, `passive`, `aggressive`, and `force`. - -* `mirroring`: Sets up mirroring of the pool - * `enabled`: whether mirroring is enabled on that pool (default: false) - * `mode`: mirroring mode to run, possible values are "pool" or "image" (required). Refer to the [mirroring modes Ceph documentation](https://docs.ceph.com/docs/master/rbd/rbd-mirroring/#enable-mirroring) for more details. - * `snapshotSchedules`: schedule(s) snapshot at the **pool** level. **Only** supported as of Ceph Octopus release. One or more schedules are supported. - * `interval`: frequency of the snapshots. The interval can be specified in days, hours, or minutes using d, h, m suffix respectively. - * `startTime`: optional, determines at what time the snapshot process starts, specified using the ISO 8601 time format. - * `peers`: to configure mirroring peers - * `secretNames`: a list of peers to connect to. Currently (Ceph Octopus release) **only a single** peer is supported where a peer represents a Ceph cluster. - -* `statusCheck`: Sets up pool mirroring status - * `mirror`: displays the mirroring status - * `disabled`: whether to enable or disable pool mirroring status - * `interval`: time interval to refresh the mirroring status (default 60s) - -* `quotas`: Set byte and object quotas. See the [ceph documentation](https://docs.ceph.com/en/latest/rados/operations/pools/#set-pool-quotas) for more info. - * `maxSize`: quota in bytes as a string with quantity suffixes (e.g. "10Gi") - * `maxObjects`: quota in objects as an integer - > **NOTE**: A value of 0 disables the quota. - -### Add specific pool properties - -With `poolProperties` you can set any pool property: - -```yaml -spec: - parameters: - : -``` - -For instance: - -```yaml -spec: - parameters: - min_size: 1 -``` - -### Erasure Coding - -[Erasure coding](http://docs.ceph.com/docs/master/rados/operations/erasure-code/) allows you to keep your data safe while reducing the storage overhead. Instead of creating multiple replicas of the data, -erasure coding divides the original data into chunks of equal size, then generates extra chunks of that same size for redundancy. - -For example, if you have an object of size 2MB, the simplest erasure coding with two data chunks would divide the object into two chunks of size 1MB each (data chunks). One more chunk (coding chunk) of size 1MB will be generated. In total, 3MB will be stored in the cluster. The object will be able to suffer the loss of any one of the chunks and still be able to reconstruct the original object. - -The number of data and coding chunks you choose will depend on your resiliency to loss and how much storage overhead is acceptable in your storage cluster. -Here are some examples to illustrate how the number of chunks affects the storage and loss toleration. - -| Data chunks (k) | Coding chunks (m) | Total storage | Losses Tolerated | OSDs required | -| --------------- | ----------------- | ------------- | ---------------- | ------------- | -| 2 | 1 | 1.5x | 1 | 3 | -| 2 | 2 | 2x | 2 | 4 | -| 4 | 2 | 1.5x | 2 | 6 | -| 16 | 4 | 1.25x | 4 | 20 | - -The `failureDomain` must be also be taken into account when determining the number of chunks. The failure domain determines the level in the Ceph CRUSH hierarchy where the chunks must be uniquely distributed. This decision will impact whether node losses or disk losses are tolerated. There could also be performance differences of placing the data across nodes or osds. - -* `host`: All chunks will be placed on unique hosts -* `osd`: All chunks will be placed on unique OSDs - -If you do not have a sufficient number of hosts or OSDs for unique placement the pool can be created, writing to the pool will hang. - -Rook currently only configures two levels in the CRUSH map. It is also possible to configure other levels such as `rack` with by adding [topology labels](ceph-cluster-crd.md#osd-topology) to the nodes. diff --git a/Documentation/ceph-prerequisites.md b/Documentation/ceph-prerequisites.md deleted file mode 100644 index 5e9819aa6..000000000 --- a/Documentation/ceph-prerequisites.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Prerequisites -weight: 2010 -indent: true ---- - -# Ceph Prerequisites - -To make sure you have a Kubernetes cluster that is ready for `Rook`, review the general [Rook Prerequisites](k8s-pre-reqs.md). - -In order to configure the Ceph storage cluster, at least one of these local storage options are required: -- Raw devices (no partitions or formatted filesystems) -- Raw partitions (no formatted filesystem) -- PVs available from a storage class in `block` mode - -## LVM package - -Ceph OSDs have a dependency on LVM in the following scenarios: -- OSDs are created on raw devices or partitions -- If encryption is enabled (`encryptedDevice: true` in the cluster CR) -- A `metadata` device is specified - -LVM is not required for OSDs in these scenarios: -- Creating OSDs on PVCs using the `storageClassDeviceSets` - -If LVM is required for your scenario, LVM needs to be available on the hosts where OSDs will be running. -Some Linux distributions do not ship with the `lvm2` package. This package is required on all storage nodes in your k8s cluster to run Ceph OSDs. -Without this package even though Rook will be able to successfully create the Ceph OSDs, when a node is rebooted the OSD pods -running on the restarted node will **fail to start**. Please install LVM using your Linux distribution's package manager. For example: - -CentOS: - -```console -sudo yum install -y lvm2 -``` - -Ubuntu: - -```console -sudo apt-get install -y lvm2 -``` - -RancherOS: - -- Since version [1.5.0](https://github.com/rancher/os/issues/2551) LVM is supported -- Logical volumes [will not be activated](https://github.com/rook/rook/issues/5027) during the boot process. You need to add an [runcmd command](https://rancher.com/docs/os/v1.x/en/installation/configuration/running-commands/) for that. - -```yaml -runcmd: -- [ vgchange, -ay ] -``` - -## Ceph Flexvolume Configuration - -**NOTE** This configuration is only needed when using the FlexVolume driver (required for Kubernetes 1.12 or earlier). The Ceph-CSI RBD driver or the Ceph-CSI CephFS driver are recommended for Kubernetes 1.13 and newer, making FlexVolume configuration redundant. - -If you want to configure volumes with the Flex driver instead of CSI, the Rook agent requires setup as a Flex volume plugin to manage the storage attachments in your cluster. -See the [Flex Volume Configuration](flexvolume.md) topic to configure your Kubernetes deployment to load the Rook volume plugin. - -### Extra agent mounts - -On certain distributions it may be necessary to mount additional directories into the agent container. That is what the environment variable `AGENT_MOUNTS` is for. Also see the documentation in [helm-operator](helm-operator.md) on the parameter `agent.mounts`. The format of the variable content should be `mountname1=/host/path1:/container/path1,mountname2=/host/path2:/container/path2`. - -## Kernel - -### RBD - -Ceph requires a Linux kernel built with the RBD module. Many Linux distributions have this module, but not all distributions. -For example, the GKE Container-Optimised OS (COS) does not have RBD. - -You can test your Kubernetes nodes by running `modprobe rbd`. -If it says 'not found', you may have to [rebuild your kernel](https://rook.io/docs/rook/master/common-issues.html#rook-agent-rbd-module-missing-error) -or choose a different Linux distribution. - -### CephFS - -If you will be creating volumes from a Ceph shared file system (CephFS), the recommended minimum kernel version is **4.17**. -If you have a kernel version less than 4.17, the requested PVC sizes will not be enforced. Storage quotas will only be -enforced on newer kernels. - -## Kernel modules directory configuration - -Normally, on Linux, kernel modules can be found in `/lib/modules`. However, there are some distributions that put them elsewhere. In that case the environment variable `LIB_MODULES_DIR_PATH` can be used to override the default. Also see the documentation in [helm-operator](helm-operator.md) on the parameter `agent.libModulesDirPath`. One notable distribution where this setting is useful would be [NixOS](https://nixos.org). diff --git a/Documentation/ceph-quickstart.md b/Documentation/ceph-quickstart.md deleted file mode 100644 index 79005807a..000000000 --- a/Documentation/ceph-quickstart.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: Ceph Storage -weight: 300 -indent: true ---- - -{% include_relative branch.liquid %} - - -# Ceph Storage Quickstart - -This guide will walk you through the basic setup of a Ceph cluster and enable you to consume block, object, and file storage -from other pods running in your cluster. - -## Minimum Version - -Kubernetes **v1.11** or higher is supported by Rook. - -**Important** If you are using K8s 1.15 or older, you will need to create a different version of the Rook CRDs. Create the `crds.yaml` found in the [pre-k8s-1.16](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/pre-k8s-1.16) subfolder of the example manifests. - -## Prerequisites - -To make sure you have a Kubernetes cluster that is ready for `Rook`, you can [follow these instructions](k8s-pre-reqs.md). - -In order to configure the Ceph storage cluster, at least one of these local storage options are required: -- Raw devices (no partitions or formatted filesystems) - - This requires `lvm2` to be installed on the host. - To avoid this dependency, you can create a single full-disk partition on the disk (see below) -- Raw partitions (no formatted filesystem) -- Persistent Volumes available from a storage class in `block` mode - -You can confirm whether your partitions or devices are formatted filesystems with the following command. - -```console -lsblk -f -``` ->``` ->NAME FSTYPE LABEL UUID MOUNTPOINT ->vda ->└─vda1 LVM2_member >eSO50t-GkUV-YKTH-WsGq-hNJY-eKNf-3i07IB -> ├─ubuntu--vg-root ext4 c2366f76-6e21-4f10-a8f3-6776212e2fe4 / -> └─ubuntu--vg-swap_1 swap 9492a3dc-ad75-47cd-9596-678e8cf17ff9 [SWAP] ->vdb ->``` - -If the `FSTYPE` field is not empty, there is a filesystem on top of the corresponding device. In this case, you can use vdb for Ceph and can't use vda and its partitions. - -## TL;DR - -If you're feeling lucky, a simple Rook cluster can be created with the following kubectl commands and [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph). For the more detailed install, skip to the next section to [deploy the Rook operator](#deploy-the-rook-operator). - -```console -$ git clone --single-branch --branch v1.7.2 https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/ceph -kubectl create -f crds.yaml -f common.yaml -f operator.yaml -kubectl create -f cluster.yaml -``` - -After the cluster is running, you can create [block, object, or file](#storage) storage to be consumed by other applications in your cluster. - -### Cluster Environments - -The Rook documentation is focused around starting Rook in a production environment. Examples are also -provided to relax some settings for test environments. When creating the cluster later in this guide, consider these example cluster manifests: -- [cluster.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml): Cluster settings for a production cluster running on bare metal. Requires at least three worker nodes. -- [cluster-on-pvc.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml): Cluster settings for a production cluster running in a dynamic cloud environment. -- [cluster-test.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster-test.yaml): Cluster settings for a test environment such as minikube. - -See the [Ceph examples](ceph-examples.md) for more details. - -## Deploy the Rook Operator - -The first step is to deploy the Rook operator. Check that you are using the [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph) that correspond to your release of Rook. For more options, see the [examples documentation](ceph-examples.md). - -```console -cd cluster/examples/kubernetes/ceph -kubectl create -f crds.yaml -f common.yaml -f operator.yaml - -# verify the rook-ceph-operator is in the `Running` state before proceeding -kubectl -n rook-ceph get pod -``` - -You can also deploy the operator with the [Rook Helm Chart](helm-operator.md). - -Before you start the operator in production, there are some settings that you may want to consider: -1. If you are using kubernetes v1.15 or older you need to create CRDs found here `/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crd.yaml`. - The apiextension v1beta1 version of CustomResourceDefinition was deprecated in Kubernetes v1.16. -2. Consider if you want to enable certain Rook features that are disabled by default. See the [operator.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/operator.yaml) for these and other advanced settings. - 1. Device discovery: Rook will watch for new devices to configure if the `ROOK_ENABLE_DISCOVERY_DAEMON` setting is enabled, commonly used in bare metal clusters. - 2. Flex driver: The flex driver is deprecated in favor of the CSI driver, but can still be enabled with the `ROOK_ENABLE_FLEX_DRIVER` setting. - 3. Node affinity and tolerations: The CSI driver by default will run on any node in the cluster. To configure the CSI driver affinity, several settings are available. - -If you wish to deploy into a namespace other than the default `rook-ceph`, see the -[Ceph advanced configuration section](ceph-advanced-configuration.md#using-alternate-namespaces) on the topic. - -## Create a Rook Ceph Cluster - -Now that the Rook operator is running we can create the Ceph cluster. For the cluster to survive reboots, -make sure you set the `dataDirHostPath` property that is valid for your hosts. For more settings, see the documentation on [configuring the cluster](ceph-cluster-crd.md). - -Create the cluster: - -```console -kubectl create -f cluster.yaml -``` - -Use `kubectl` to list pods in the `rook-ceph` namespace. You should be able to see the following pods once they are all running. -The number of osd pods will depend on the number of nodes in the cluster and the number of devices configured. -If you did not modify the `cluster.yaml` above, it is expected that one OSD will be created per node. -The CSI, `rook-ceph-agent` (flex driver), and `rook-discover` pods are also optional depending on your settings. - -> If the `rook-ceph-mon`, `rook-ceph-mgr`, or `rook-ceph-osd` pods are not created, please refer to the -> [Ceph common issues](ceph-common-issues.md) for more details and potential solutions. - -```console -kubectl -n rook-ceph get pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->csi-cephfsplugin-provisioner-d77bb49c6-n5tgs 5/5 Running 0 140s ->csi-cephfsplugin-provisioner-d77bb49c6-v9rvn 5/5 Running 0 140s ->csi-cephfsplugin-rthrp 3/3 Running 0 140s ->csi-rbdplugin-hbsm7 3/3 Running 0 140s ->csi-rbdplugin-provisioner-5b5cd64fd-nvk6c 6/6 Running 0 140s ->csi-rbdplugin-provisioner-5b5cd64fd-q7bxl 6/6 Running 0 140s ->rook-ceph-crashcollector-minikube-5b57b7c5d4-hfldl 1/1 Running 0 105s ->rook-ceph-mgr-a-64cd7cdf54-j8b5p 1/1 Running 0 77s ->rook-ceph-mon-a-694bb7987d-fp9w7 1/1 Running 0 105s ->rook-ceph-mon-b-856fdd5cb9-5h2qk 1/1 Running 0 94s ->rook-ceph-mon-c-57545897fc-j576h 1/1 Running 0 85s ->rook-ceph-operator-85f5b946bd-s8grz 1/1 Running 0 92m ->rook-ceph-osd-0-6bb747b6c5-lnvb6 1/1 Running 0 23s ->rook-ceph-osd-1-7f67f9646d-44p7v 1/1 Running 0 24s ->rook-ceph-osd-2-6cd4b776ff-v4d68 1/1 Running 0 25s ->rook-ceph-osd-prepare-node1-vx2rz 0/2 Completed 0 60s ->rook-ceph-osd-prepare-node2-ab3fd 0/2 Completed 0 60s ->rook-ceph-osd-prepare-node3-w4xyz 0/2 Completed 0 60s ->``` - -To verify that the cluster is in a healthy state, connect to the [Rook toolbox](ceph-toolbox.md) and run the -`ceph status` command. - -* All mons should be in quorum -* A mgr should be active -* At least one OSD should be active -* If the health is not `HEALTH_OK`, the warnings or errors should be investigated - -```console -ceph status -``` ->``` -> cluster: -> id: a0452c76-30d9-4c1a-a948-5d8405f19a7c -> health: HEALTH_OK -> -> services: -> mon: 3 daemons, quorum a,b,c (age 3m) -> mgr: a(active, since 2m) -> osd: 3 osds: 3 up (since 1m), 3 in (since 1m) ->... ->``` - -If the cluster is not healthy, please refer to the [Ceph common issues](ceph-common-issues.md) for more details and potential solutions. - -## Storage - -For a walkthrough of the three types of storage exposed by Rook, see the guides for: - -* **[Block](ceph-block.md)**: Create block storage to be consumed by a pod -* **[Object](ceph-object.md)**: Create an object store that is accessible inside or outside the Kubernetes cluster -* **[Shared Filesystem](ceph-filesystem.md)**: Create a filesystem to be shared across multiple pods - -## Ceph Dashboard - -Ceph has a dashboard in which you can view the status of your cluster. Please see the [dashboard guide](ceph-dashboard.md) for more details. - -## Tools - -We have created a toolbox container that contains the full suite of Ceph clients for debugging and troubleshooting your Rook cluster. Please see the [toolbox readme](ceph-toolbox.md) for setup and usage information. Also see our [advanced configuration](ceph-advanced-configuration.md) document for helpful maintenance and tuning examples. - -## Monitoring - -Each Rook cluster has some built in metrics collectors/exporters for monitoring with [Prometheus](https://prometheus.io/). -To learn how to set up monitoring for your Rook cluster, you can follow the steps in the [monitoring guide](./ceph-monitoring.md). - -## Teardown - -When you are done with the test cluster, see [these instructions](ceph-teardown.md) to clean up the cluster. diff --git a/Documentation/ceph-rbd-mirror-crd.md b/Documentation/ceph-rbd-mirror-crd.md deleted file mode 100644 index 1820f6bb5..000000000 --- a/Documentation/ceph-rbd-mirror-crd.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: RBDMirror CRD -weight: 3500 -indent: true ---- -{% include_relative branch.liquid %} - -# Ceph RBDMirror CRD - -Rook allows creation and updating rbd-mirror daemon(s) through the custom resource definitions (CRDs). -RBD images can be asynchronously mirrored between two Ceph clusters. -For more information about user management and capabilities see the [Ceph docs](https://docs.ceph.com/docs/master/rbd/rbd-mirroring/). - -## Creating daemons - -To get you started, here is a simple example of a CRD to deploy an rbd-mirror daemon. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephRBDMirror -metadata: - name: my-rbd-mirror - namespace: rook-ceph -spec: - count: 1 -``` - -### Prerequisites - -This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](ceph-quickstart.md) - -## Settings - -If any setting is unspecified, a suitable default will be used automatically. - -### RBDMirror metadata - -* `name`: The name that will be used for the Ceph RBD Mirror daemon. -* `namespace`: The Kubernetes namespace that will be created for the Rook cluster. The services, pods, and other resources created by the operator will be added to this namespace. - -### RBDMirror Settings - -* `count`: The number of rbd mirror instance to run. -* `placement`: The rbd mirror pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml). -* `annotations`: Key value pair list of annotations to add. -* `labels`: Key value pair list of labels to add. -* `resources`: The resource requirements for the rbd mirror pods. -* `priorityClassName`: The priority class to set on the rbd mirror pods. - -### Configuring mirroring peers - -On an external site you want to mirror with, you need to create a bootstrap peer token. -The token will be used by one site to **pull** images from the other site. -The following assumes the name of the pool is "test" and the site name "europe" (just like the region), so we will be pulling images from this site: - -```console -external-cluster-console # rbd mirror pool peer bootstrap create test --site-name europe -``` - -For more details, refer to the official rbd mirror documentation on [how to create a bootstrap peer](https://docs.ceph.com/docs/master/rbd/rbd-mirroring/#bootstrap-peers). - -When the peer token is available, you need to create a Kubernetes Secret. -Our `europe-cluster-peer-pool-test-1` will have to be created manually, like so: - -```console -$ kubectl -n rook-ceph create secret generic "europe-cluster-peer-pool-test-1" \ ---from-literal=token=eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ== \ ---from-literal=pool=test -``` - -Rook will read both `token` and `pool` keys of the Data content of the Secret. -Rook also accepts the `destination` key, which specifies the mirroring direction. -It defaults to rx-tx for bidirectional mirroring, but can also be set to rx-only for unidirectional mirroring. - -You can now inject the rbdmirror CR: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephRBDMirror -metadata: - name: my-rbd-mirror - namespace: rook-ceph -spec: - count: 1 - peers: - secretNames: - - "europe-cluster-peer-pool-test-1" -``` - -You can add more pools, for this just repeat the above and change the "pool" value of the Kubernetes Secret. -So the list might eventually look like: - -```yaml - peers: - secretNames: - - "europe-cluster-peer-pool-test-1" - - "europe-cluster-peer-pool-test-2" - - "europe-cluster-peer-pool-test-3" -``` - -Along with three Kubernetes Secret. diff --git a/Documentation/ceph-storage.md b/Documentation/ceph-storage.md deleted file mode 100644 index 28dc963f1..000000000 --- a/Documentation/ceph-storage.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Ceph Storage -weight: 2000 ---- - -# Ceph Storage - -Ceph is a highly scalable distributed storage solution for **block storage**, **object storage**, and **shared filesystems** with years of production deployments. - -## Design - -Rook enables Ceph storage systems to run on Kubernetes using Kubernetes primitives. The following image illustrates how Ceph Rook integrates with Kubernetes: - -![Rook Architecture on Kubernetes](media/rook-architecture.png) -With Ceph running in the Kubernetes cluster, Kubernetes applications can -mount block devices and filesystems managed by Rook, or can use the S3/Swift API for object storage. The Rook operator -automates configuration of storage components and monitors the cluster to ensure the storage remains available -and healthy. - -The Rook operator is a simple container that has all that is needed to bootstrap -and monitor the storage cluster. The operator will start and monitor [Ceph monitor pods](ceph-mon-health.md), the Ceph OSD daemons to provide RADOS storage, as well as start and manage other Ceph daemons. The operator manages CRDs for pools, object stores (S3/Swift), and filesystems by initializing the pods and other artifacts necessary to run the services. - -The operator will monitor the storage daemons to ensure the cluster is healthy. Ceph mons will be started or failed over when necessary, and -other adjustments are made as the cluster grows or shrinks. The operator will also watch for desired state changes -requested by the api service and apply the changes. - -The Rook operator also initializes the agents that are needed for consuming the storage. Rook automatically configures the Ceph-CSI driver to mount the storage to your pods. Rook's flex driver is also available, though it is not enabled by default and will soon be deprecated in favor of the CSI driver. - -![Rook Components on Kubernetes](media/kubernetes.png) - -The `rook/ceph` image includes all necessary tools to manage the cluster -- there is no change to the data path. -Rook does not attempt to maintain full fidelity with Ceph. Many of the Ceph concepts like placement groups and crush maps -are hidden so you don't have to worry about them. Instead Rook creates a much simplified user experience for admins that is in terms -of physical resources, pools, volumes, filesystems, and buckets. At the same time, advanced configuration can be applied when needed with the Ceph tools. - -Rook is implemented in golang. Ceph is implemented in C++ where the data path is highly optimized. We believe -this combination offers the best of both worlds. diff --git a/Documentation/ceph-teardown.md b/Documentation/ceph-teardown.md deleted file mode 100644 index 58e0ac02f..000000000 --- a/Documentation/ceph-teardown.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Cleanup -weight: 3900 -indent: true ---- - -# Cleaning up a Cluster - -If you want to tear down the cluster and bring up a new one, be aware of the following resources that will need to be cleaned up: - -* `rook-ceph` namespace: The Rook operator and cluster created by `operator.yaml` and `cluster.yaml` (the cluster CRD) -* `/var/lib/rook`: Path on each host in the cluster where configuration is cached by the ceph mons and osds - -Note that if you changed the default namespaces or paths such as `dataDirHostPath` in the sample yaml files, you will need to adjust these namespaces and paths throughout these instructions. - -If you see issues tearing down the cluster, see the [Troubleshooting](#troubleshooting) section below. - -If you are tearing down a cluster frequently for development purposes, it is instead recommended to use an environment such as Minikube that can easily be reset without worrying about any of these steps. - -## Delete the Block and File artifacts - -First you will need to clean up the resources created on top of the Rook cluster. - -These commands will clean up the resources from the [block](ceph-block.md#teardown) and [file](ceph-filesystem.md#teardown) walkthroughs (unmount volumes, delete volume claims, etc). If you did not complete those parts of the walkthrough, you can skip these instructions: - -```console -kubectl delete -f ../wordpress.yaml -kubectl delete -f ../mysql.yaml -kubectl delete -n rook-ceph cephblockpool replicapool -kubectl delete storageclass rook-ceph-block -kubectl delete -f csi/cephfs/kube-registry.yaml -kubectl delete storageclass csi-cephfs -``` - -After those block and file resources have been cleaned up, you can then delete your Rook cluster. This is important to delete **before removing the Rook operator and agent or else resources may not be cleaned up properly**. - -## Delete the CephCluster CRD - -Edit the `CephCluster` and add the `cleanupPolicy` - -WARNING: DATA WILL BE PERMANENTLY DELETED AFTER DELETING THE `CephCluster` CR WITH `cleanupPolicy`. - -```console -kubectl -n rook-ceph patch cephcluster rook-ceph --type merge -p '{"spec":{"cleanupPolicy":{"confirmation":"yes-really-destroy-data"}}}' -``` - -Once the cleanup policy is enabled, any new configuration changes in the CephCluster will be blocked. Nothing will happen until the deletion of the CR is requested, so this `cleanupPolicy` change can still be reverted if needed. - -Checkout more details about the `cleanupPolicy` [here](ceph-cluster-crd.md#cleanup-policy) - -Delete the `CephCluster` CR. - -```console -kubectl -n rook-ceph delete cephcluster rook-ceph -``` - -Verify that the cluster CR has been deleted before continuing to the next step. - -```console -kubectl -n rook-ceph get cephcluster -``` - -If the `cleanupPolicy` was applied, then wait for the `rook-ceph-cleanup` jobs to be completed on all the nodes. -These jobs will perform the following operations: -- Delete the directory `/var/lib/rook` (or the path specified by the `dataDirHostPath`) on all the nodes -- Wipe the data on the drives on all the nodes where OSDs were running in this cluster - -Note: The cleanup jobs might not start if the resources created on top of Rook Cluster are not deleted completely. [See](ceph-teardown.md#delete-the-block-and-file-artifacts) - -## Delete the Operator and related Resources - -This will begin the process of the Rook Ceph operator and all other resources being cleaned up. -This includes related resources such as the agent and discover daemonsets with the following commands: - -```console -kubectl delete -f operator.yaml -kubectl delete -f common.yaml -kubectl delete -f crds.yaml -``` - -If the `cleanupPolicy` was applied and the cleanup jobs have completed on all the nodes, then the cluster tear down has been successful. If you skipped adding the `cleanupPolicy` then follow the manual steps mentioned below to tear down the cluster. - -## Delete the data on hosts - -> **IMPORTANT**: The final cleanup step requires deleting files on each host in the cluster. All files under the `dataDirHostPath` property specified in the cluster CRD will need to be deleted. Otherwise, inconsistent state will remain when a new cluster is started. - -Connect to each machine and delete `/var/lib/rook`, or the path specified by the `dataDirHostPath`. - -In the future this step will not be necessary when we build on the K8s local storage feature. - -If you modified the demo settings, additional cleanup is up to you for devices, host paths, etc. - -### Zapping Devices - -Disks on nodes used by Rook for osds can be reset to a usable state with the following methods: - -```console -#!/usr/bin/env bash -DISK="/dev/sdb" - -# Zap the disk to a fresh, usable state (zap-all is important, b/c MBR has to be clean) - -# You will have to run this step for all disks. -sgdisk --zap-all $DISK - -# Clean hdds with dd -dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync - -# Clean disks such as ssd with blkdiscard instead of dd -blkdiscard $DISK - -# These steps only have to be run once on each node -# If rook sets up osds using ceph-volume, teardown leaves some devices mapped that lock the disks. -ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove % - -# ceph-volume setup can leave ceph- directories in /dev and /dev/mapper (unnecessary clutter) -rm -rf /dev/ceph-* -rm -rf /dev/mapper/ceph--* - -# Inform the OS of partition table changes -partprobe $DISK -``` - -## Troubleshooting - -If the cleanup instructions are not executed in the order above, or you otherwise have difficulty cleaning up the cluster, here are a few things to try. - -The most common issue cleaning up the cluster is that the `rook-ceph` namespace or the cluster CRD remain indefinitely in the `terminating` state. A namespace cannot be removed until all of its resources are removed, so look at which resources are pending termination. - -Look at the pods: - -```console -kubectl -n rook-ceph get pod -``` - -If a pod is still terminating, you will need to wait or else attempt to forcefully terminate it (`kubectl delete pod `). - -Now look at the cluster CRD: - -```console -kubectl -n rook-ceph get cephcluster -``` - -If the cluster CRD still exists even though you have executed the delete command earlier, see the next section on removing the finalizer. - -### Removing the Cluster CRD Finalizer - -When a Cluster CRD is created, a [finalizer](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#finalizers) is added automatically by the Rook operator. The finalizer will allow the operator to ensure that before the cluster CRD is deleted, all block and file mounts will be cleaned up. Without proper cleanup, pods consuming the storage will be hung indefinitely until a system reboot. - -The operator is responsible for removing the finalizer after the mounts have been cleaned up. -If for some reason the operator is not able to remove the finalizer (ie. the operator is not running anymore), you can delete the finalizer manually with the following command: - -```console -for CRD in $(kubectl get crd -n rook-ceph | awk '/ceph.rook.io/ {print $1}'); do - kubectl get -n rook-ceph "$CRD" -o name | \ - xargs -I {} kubectl patch -n rook-ceph {} --type merge -p '{"metadata":{"finalizers": [null]}}' -done -``` - -This command will patch the following CRDs on v1.3: ->``` -> cephblockpools.ceph.rook.io -> cephclients.ceph.rook.io -> cephfilesystems.ceph.rook.io -> cephnfses.ceph.rook.io -> cephobjectstores.ceph.rook.io -> cephobjectstoreusers.ceph.rook.io ->``` - -Within a few seconds you should see that the cluster CRD has been deleted and will no longer block other cleanup such as deleting the `rook-ceph` namespace. - -If the namespace is still stuck in Terminating state, you can check which resources are holding up the deletion and remove the finalizers and delete those - -```console -kubectl api-resources --verbs=list --namespaced -o name \ - | xargs -n 1 kubectl get --show-kind --ignore-not-found -n rook-ceph -``` diff --git a/Documentation/ceph-toolbox.md b/Documentation/ceph-toolbox.md deleted file mode 100644 index 7fd6fdbd2..000000000 --- a/Documentation/ceph-toolbox.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Toolbox -weight: 11100 -indent: true ---- - -# Rook Toolbox - -The Rook toolbox is a container with common tools used for rook debugging and testing. -The toolbox is based on CentOS, so more tools of your choosing can be easily installed with `yum`. - -The toolbox can be run in two modes: -1. [Interactive](#interactive-toolbox): Start a toolbox pod where you can connect and execute Ceph commands from a shell -2. [One-time job](#toolbox-job): Run a script with Ceph commands and collect the results from the job log - -> Prerequisite: Before running the toolbox you should have a running Rook cluster deployed (see the [Quickstart Guide](ceph-quickstart.md)). - -## Interactive Toolbox - -The rook toolbox can run as a deployment in a Kubernetes cluster where you can connect and -run arbitrary Ceph commands. - -Save the tools spec as `toolbox.yaml`: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-ceph-tools - namespace: rook-ceph - labels: - app: rook-ceph-tools -spec: - replicas: 1 - selector: - matchLabels: - app: rook-ceph-tools - template: - metadata: - labels: - app: rook-ceph-tools - spec: - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: rook-ceph-tools - image: rook/ceph:v1.7.2 - command: ["/tini"] - args: ["-g", "--", "/usr/local/bin/toolbox.sh"] - imagePullPolicy: IfNotPresent - env: - - name: ROOK_CEPH_USERNAME - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-username - - name: ROOK_CEPH_SECRET - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-secret - volumeMounts: - - mountPath: /etc/ceph - name: ceph-config - - name: mon-endpoint-volume - mountPath: /etc/rook - volumes: - - name: mon-endpoint-volume - configMap: - name: rook-ceph-mon-endpoints - items: - - key: data - path: mon-endpoints - - name: ceph-config - emptyDir: {} - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 5 -``` - -Launch the rook-ceph-tools pod: - -```console -kubectl create -f toolbox.yaml -``` - -Wait for the toolbox pod to download its container and get to the `running` state: - -```console -kubectl -n rook-ceph rollout status deploy/rook-ceph-tools -``` - -Once the rook-ceph-tools pod is running, you can connect to it with: - -```console -kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash -``` - -All available tools in the toolbox are ready for your troubleshooting needs. - -**Example**: - -* `ceph status` -* `ceph osd status` -* `ceph df` -* `rados df` - -When you are done with the toolbox, you can remove the deployment: - -```console -kubectl -n rook-ceph delete deploy/rook-ceph-tools -``` - -## Toolbox Job - -If you want to run Ceph commands as a one-time operation and collect the results later from the -logs, you can run a script as a Kubernetes Job. The toolbox job will run a script that is embedded -in the job spec. The script has the full flexibility of a bash script. - -In this example, the `ceph status` command is executed when the job is created. - -```yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: rook-ceph-toolbox-job - namespace: rook-ceph - labels: - app: ceph-toolbox-job -spec: - template: - spec: - initContainers: - - name: config-init - image: rook/ceph:v1.7.2 - command: ["/usr/local/bin/toolbox.sh"] - args: ["--skip-watch"] - imagePullPolicy: IfNotPresent - env: - - name: ROOK_CEPH_USERNAME - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-username - - name: ROOK_CEPH_SECRET - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-secret - volumeMounts: - - mountPath: /etc/ceph - name: ceph-config - - name: mon-endpoint-volume - mountPath: /etc/rook - containers: - - name: script - image: rook/ceph:v1.7.2 - volumeMounts: - - mountPath: /etc/ceph - name: ceph-config - readOnly: true - command: - - "bash" - - "-c" - - | - # Modify this script to run any ceph, rbd, radosgw-admin, or other commands that could - # be run in the toolbox pod. The output of the commands can be seen by getting the pod log. - # - # example: print the ceph status - ceph status - volumes: - - name: mon-endpoint-volume - configMap: - name: rook-ceph-mon-endpoints - items: - - key: data - path: mon-endpoints - - name: ceph-config - emptyDir: {} - restartPolicy: Never -``` - -Create the toolbox job: - -```console -kubectl create -f toolbox-job.yaml -``` - -After the job completes, see the results of the script: - -```console -kubectl -n rook-ceph logs -l job-name=rook-ceph-toolbox-job -``` diff --git a/Documentation/ceph-tools.md b/Documentation/ceph-tools.md deleted file mode 100644 index a715d76f0..000000000 --- a/Documentation/ceph-tools.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Ceph Tools -weight: 11000 ---- - -# Ceph Tools - -Rook provides a number of tools and troubleshooting docs to help you manage your cluster. - -* [Toolbox](ceph-toolbox.md): A pod from which you can run all of the tools to troubleshoot the storage cluster -* [Common Issues](ceph-common-issues.md): Common issues and their potential solutions -* [OSD Management](ceph-osd-mgmt.md): Common configuration issues for Ceph OSDs such as adding and removing storage -* [Direct Tools](direct-tools.md): Run ceph commands to test directly mounting block and file storage -* [Advanced Configuration](ceph-advanced-configuration.md): Tips and tricks for configuring for cluster -* [Openshift Common Issues](ceph-openshift-issues.md): Common troubleshooting tips for OpenShift clusters -* [Disaster Recovery](ceph-disaster-recovery.md): In the worst case scenario if the ceph mons lose quorum, follow these steps to recover diff --git a/Documentation/ceph-upgrade.md b/Documentation/ceph-upgrade.md deleted file mode 100644 index d14e922f0..000000000 --- a/Documentation/ceph-upgrade.md +++ /dev/null @@ -1,500 +0,0 @@ ---- -title: Upgrades -weight: 3800 -indent: true ---- - -# Rook-Ceph Upgrades - -This guide will walk you through the steps to upgrade the software in a Rook-Ceph cluster from one -version to the next. This includes both the Rook-Ceph operator software itself as well as the Ceph -cluster software. - -Upgrades for both the operator and for Ceph are nearly entirely automated save for where Rook's -permissions need to be explicitly updated by an admin or when incompatibilities need to be addressed -manually due to customizations. - -We welcome feedback and opening issues! - -## Supported Versions - -This guide is for upgrading from **Rook v1.6.x to Rook v1.7.x**. - -Please refer to the upgrade guides from previous releases for supported upgrade paths. -Rook upgrades are only supported between official releases. Upgrades to and from `master` are not -supported. - -For a guide to upgrade previous versions of Rook, please refer to the version of documentation for -those releases. - -* [Upgrade 1.5 to 1.6](https://rook.io/docs/rook/v1.6/ceph-upgrade.html) -* [Upgrade 1.4 to 1.5](https://rook.io/docs/rook/v1.5/ceph-upgrade.html) -* [Upgrade 1.3 to 1.4](https://rook.io/docs/rook/v1.4/ceph-upgrade.html) -* [Upgrade 1.2 to 1.3](https://rook.io/docs/rook/v1.3/ceph-upgrade.html) -* [Upgrade 1.1 to 1.2](https://rook.io/docs/rook/v1.2/ceph-upgrade.html) -* [Upgrade 1.0 to 1.1](https://rook.io/docs/rook/v1.1/ceph-upgrade.html) -* [Upgrade 0.9 to 1.0](https://rook.io/docs/rook/v1.0/ceph-upgrade.html) -* [Upgrade 0.8 to 0.9](https://rook.io/docs/rook/v0.9/ceph-upgrade.html) -* [Upgrade 0.7 to 0.8](https://rook.io/docs/rook/v0.8/upgrade.html) -* [Upgrade 0.6 to 0.7](https://rook.io/docs/rook/v0.7/upgrade.html) -* [Upgrade 0.5 to 0.6](https://rook.io/docs/rook/v0.6/upgrade.html) - -## Considerations - -With this upgrade guide, there are a few notes to consider: - -* **WARNING**: Upgrading a Rook cluster is not without risk. There may be unexpected issues or - obstacles that damage the integrity and health of your storage cluster, including data loss. -* The Rook cluster's storage may be unavailable for short periods during the upgrade process for - both Rook operator updates and for Ceph version updates. -* We recommend that you read this document in full before you undertake a Rook cluster upgrade. - -## Patch Release Upgrades - -Unless otherwise noted due to extenuating requirements, upgrades from one patch release of Rook to -another are as simple as updating the common resources and the image of the Rook operator. For -example, when Rook v1.7.2 is released, the process of updating from v1.7.0 is as simple as running -the following: - -First get the latest common resources manifests that contain the latest changes for Rook v1.7. -```sh -git clone --single-branch --depth=1 --branch v1.7.2 https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/ceph -``` - -If you have deployed the Rook Operator or the Ceph cluster into a different namespace than -`rook-ceph`, see the [Update common resources and CRDs](#1-update-common-resources-and-crds) -section for instructions on how to change the default namespaces in `common.yaml`. - -Then apply the latest changes from v1.7 and update the Rook Operator image. -```console -kubectl apply -f common.yaml -f crds.yaml -kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.7.2 -``` - -As exemplified above, it is a good practice to update Rook-Ceph common resources from the example -manifests before any update. The common resources and CRDs might not be updated with every -release, but K8s will only apply updates to the ones that changed. - -Also update optional resources like Prometheus monitoring noted more fully in the -[upgrade section below](#updates-for-optional-resources). - -## Helm - -* The minimum supported Helm version is **v3.2.0** - -If you have installed Rook via the Helm chart, Helm will handle some details of the upgrade for you. -The upgrade steps in this guide will clarify if Helm manages the step for you. - -Helm will **not** update the Ceph version. See [Ceph Version Upgrades](#ceph-version-upgrades) for -instructions on updating the Ceph version. - - -## Upgrading from v1.6 to v1.7 - -**Rook releases from master are expressly unsupported.** It is strongly recommended that you use -[official releases](https://github.com/rook/rook/releases) of Rook. Unreleased versions from the -master branch are subject to changes and incompatibilities that will not be supported in the -official releases. Builds from the master branch can have functionality changed or removed at any -time without compatibility support and without prior notice. - -### **Prerequisites** - -We will do all our work in the Ceph example manifests directory. - -```sh -$ cd $YOUR_ROOK_REPO/cluster/examples/kubernetes/ceph/ -``` - -Unless your Rook cluster was created with customized namespaces, namespaces for Rook clusters are -likely to be: - -* Clusters created by v0.7 or earlier: `rook-system` and `rook` -* Clusters created in v0.8 or v0.9: `rook-ceph-system` and `rook-ceph` -* Clusters created in v1.0 or newer: only `rook-ceph` - -With this guide, we do our best not to assume the namespaces in your cluster. To make things as easy -as possible, modify and use the below snippet to configure your environment. We will use these -environment variables throughout this document. - -```sh -# Parameterize the environment -export ROOK_OPERATOR_NAMESPACE="rook-ceph" -export ROOK_CLUSTER_NAMESPACE="rook-ceph" -``` - -In order to successfully upgrade a Rook cluster, the following prerequisites must be met: - -* The cluster should be in a healthy state with full functionality. Review the - [health verification section](#health-verification) in order to verify your cluster is in a good - starting state. -* All pods consuming Rook storage should be created, running, and in a steady state. No Rook - persistent volumes should be in the act of being created or deleted. - -## Health Verification - -Before we begin the upgrade process, let's first review some ways that you can verify the health of -your cluster, ensuring that the upgrade is going smoothly after each step. Most of the health -verification checks for your cluster during the upgrade process can be performed with the Rook -toolbox. For more information about how to run the toolbox, please visit the -[Rook toolbox readme](./ceph-toolbox.md). - -See the common issues pages for troubleshooting and correcting health issues: - -* [General troubleshooting](./common-issues.md) -* [Ceph troubleshooting](./ceph-common-issues.md) - -### **Pods all Running** - -In a healthy Rook cluster, the operator, the agents and all Rook namespace pods should be in the -`Running` state and have few, if any, pod restarts. To verify this, run the following commands: - -```sh -kubectl -n $ROOK_CLUSTER_NAMESPACE get pods -``` - -### **Status Output** - -The Rook toolbox contains the Ceph tools that can give you status details of the cluster with the -`ceph status` command. Let's look at an output sample and review some of the details: - -```sh -TOOLS_POD=$(kubectl -n $ROOK_CLUSTER_NAMESPACE get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') -kubectl -n $ROOK_CLUSTER_NAMESPACE exec -it $TOOLS_POD -- ceph status -``` - ->``` -> cluster: -> id: a3f4d647-9538-4aff-9fd1-b845873c3fe9 -> health: HEALTH_OK -> -> services: -> mon: 3 daemons, quorum b,c,a -> mgr: a(active) -> mds: myfs-1/1/1 up {0=myfs-a=up:active}, 1 up:standby-replay -> osd: 6 osds: 6 up, 6 in -> rgw: 1 daemon active -> -> data: -> pools: 9 pools, 900 pgs -> objects: 67 objects, 11 KiB -> usage: 6.1 GiB used, 54 GiB / 60 GiB avail -> pgs: 900 active+clean -> -> io: -> client: 7.4 KiB/s rd, 681 B/s wr, 11 op/s rd, 4 op/s wr -> recovery: 164 B/s, 1 objects/s ->``` - -In the output above, note the following indications that the cluster is in a healthy state: - -* Cluster health: The overall cluster status is `HEALTH_OK` and there are no warning or error status - messages displayed. -* Monitors (mon): All of the monitors are included in the `quorum` list. -* Manager (mgr): The Ceph manager is in the `active` state. -* OSDs (osd): All OSDs are `up` and `in`. -* Placement groups (pgs): All PGs are in the `active+clean` state. -* (If applicable) Ceph filesystem metadata server (mds): all MDSes are `active` for all filesystems -* (If applicable) Ceph object store RADOS gateways (rgw): all daemons are `active` - -If your `ceph status` output has deviations from the general good health described above, there may -be an issue that needs to be investigated further. There are other commands you may run for more -details on the health of the system, such as `ceph osd status`. See the -[Ceph troubleshooting docs](https://docs.ceph.com/docs/master/rados/troubleshooting/) for help. - -Rook will prevent the upgrade of the Ceph daemons if the health is in a `HEALTH_ERR` state. -If you desired to proceed with the upgrade anyway, you will need to set either -`skipUpgradeChecks: true` or `continueUpgradeAfterChecksEvenIfNotHealthy: true` -as described in the [cluster CR settings](https://rook.github.io/docs/rook/v1.7/ceph-cluster-crd.html#cluster-settings). - -### **Container Versions** - -The container version running in a specific pod in the Rook cluster can be verified in its pod spec -output. For example for the monitor pod `mon-b`, we can verify the container version it is running -with the below commands: - -```sh -POD_NAME=$(kubectl -n $ROOK_CLUSTER_NAMESPACE get pod -o custom-columns=name:.metadata.name --no-headers | grep rook-ceph-mon-b) -kubectl -n $ROOK_CLUSTER_NAMESPACE get pod ${POD_NAME} -o jsonpath='{.spec.containers[0].image}' -``` - -The status and container versions for all Rook pods can be collected all at once with the following -commands: - -```sh -kubectl -n $ROOK_OPERATOR_NAMESPACE get pod -o jsonpath='{range .items[*]}{.metadata.name}{"\n\t"}{.status.phase}{"\t\t"}{.spec.containers[0].image}{"\t"}{.spec.initContainers[0]}{"\n"}{end}' && \ -kubectl -n $ROOK_CLUSTER_NAMESPACE get pod -o jsonpath='{range .items[*]}{.metadata.name}{"\n\t"}{.status.phase}{"\t\t"}{.spec.containers[0].image}{"\t"}{.spec.initContainers[0].image}{"\n"}{end}' -``` - -The `rook-version` label exists on Ceph controller resources. For various resource controllers, a -summary of the resource controllers can be gained with the commands below. These will report the -requested, updated, and currently available replicas for various Rook-Ceph resources in addition to -the version of Rook for resources managed by the updated Rook-Ceph operator. Note that the operator -and toolbox deployments do not have a `rook-version` label set. - -```sh -kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}' - -kubectl -n $ROOK_CLUSTER_NAMESPACE get jobs -o jsonpath='{range .items[*]}{.metadata.name}{" \tsucceeded: "}{.status.succeeded}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}' -``` - -### **Rook Volume Health** - -Any pod that is using a Rook volume should also remain healthy: - -* The pod should be in the `Running` state with few, if any, restarts -* There should be no errors in its logs -* The pod should still be able to read and write to the attached Rook volume. - -## Rook Operator Upgrade Process - -In the examples given in this guide, we will be upgrading a live Rook cluster running `v1.6.8` to -the version `v1.7.2`. This upgrade should work from any official patch release of Rook v1.6 to any -official patch release of v1.7. - -**Rook release from `master` are expressly unsupported.** It is strongly recommended that you use -[official releases](https://github.com/rook/rook/releases) of Rook. Unreleased versions from the -master branch are subject to changes and incompatibilities that will not be supported in the -official releases. Builds from the master branch can have functionality changed or removed at any -time without compatibility support and without prior notice. - -These methods should work for any number of Rook-Ceph clusters and Rook Operators as long as you -parameterize the environment correctly. Merely repeat these steps for each Rook-Ceph cluster -(`ROOK_CLUSTER_NAMESPACE`), and be sure to update the `ROOK_OPERATOR_NAMESPACE` parameter each time -if applicable. - -Let's get started! - -### **1. Update common resources and CRDs** - -> Automatically updated if you are upgrading via the helm chart - -First apply updates to Rook-Ceph common resources. This includes slightly modified privileges (RBAC) -needed by the Operator. Also update the Custom Resource Definitions (CRDs). - -> **IMPORTANT:** If you are using Kubernetes version v1.15 or lower, you will need to manually -> modify the `common.yaml` file to use -> `rbac.authorization.k8s.io/v1beta1` instead of `rbac.authorization.k8s.io/v1` -> You will also need to apply `pre-k8s-1.16/crds.yaml` instead of `crds.yaml`. - -First get the latest common resources manifests that contain the latest changes. -```sh -git clone --single-branch --depth=1 --branch v1.7.2 https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/ceph -``` - -If you have deployed the Rook Operator or the Ceph cluster into a different namespace than -`rook-ceph`, update the common resource manifests to use your `ROOK_OPERATOR_NAMESPACE` and -`ROOK_CLUSTER_NAMESPACE` using `sed`. -```sh -sed -i.bak \ - -e "s/\(.*\):.*# namespace:operator/\1: $ROOK_OPERATOR_NAMESPACE # namespace:operator/g" \ - -e "s/\(.*\):.*# namespace:cluster/\1: $ROOK_CLUSTER_NAMESPACE # namespace:cluster/g" \ - common.yaml -``` - -Then apply the latest changes. -```sh -kubectl apply -f common.yaml -f crds.yaml -``` - -#### **Updates for optional resources** - -If you have [Prometheus monitoring](ceph-monitoring.md) enabled, follow the -step to upgrade the Prometheus RBAC resources as well. - -```sh -kubectl apply -f cluster/examples/kubernetes/ceph/monitoring/rbac.yaml -``` - -### **2. Update Ceph CSI versions** - -> Automatically updated if you are upgrading via the helm chart - -If you have specified custom CSI images in the Rook-Ceph Operator deployment, we recommended you -update to use the latest Ceph-CSI drivers. See the [CSI Version](#csi-version) section for more -details. - -> Note: If using snapshots, refer to the [Upgrade Snapshot API guide](ceph-csi-snapshot.md#upgrade-snapshot-api). - -### **3. Update the Rook Operator** - -> Automatically updated if you are upgrading via the helm chart - -The largest portion of the upgrade is triggered when the operator's image is updated to `v1.7.x`. -When the operator is updated, it will proceed to update all of the Ceph daemons. - -```sh -kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.7.2 -``` - -### **4. Wait for the upgrade to complete** - -Watch now in amazement as the Ceph mons, mgrs, OSDs, rbd-mirrors, MDSes and RGWs are terminated and -replaced with updated versions in sequence. The cluster may be offline very briefly as mons update, -and the Ceph Filesystem may fall offline a few times while the MDSes are upgrading. This is normal. - -The versions of the components can be viewed as they are updated: - -```sh -watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}' -``` - -As an example, this cluster is midway through updating the OSDs. When all deployments report `1/1/1` -availability and `rook-version=v1.7.2`, the Ceph cluster's core components are fully updated. - ->``` ->Every 2.0s: kubectl -n rook-ceph get deployment -o j... -> ->rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.7.2 ->rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.7.2 ->rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.7.2 ->rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.7.2 ->rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.7.2 ->rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.6.8 ->rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.6.8 ->``` - -An easy check to see if the upgrade is totally finished is to check that there is only one -`rook-version` reported across the cluster. - -```console -# kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq -This cluster is not yet finished: - rook-version=v1.6.8 - rook-version=v1.7.2 -This cluster is finished: - rook-version=v1.7.2 -``` - -### **5. Verify the updated cluster** - -At this point, your Rook operator should be running version `rook/ceph:v1.7.2`. - -Verify the Ceph cluster's health using the [health verification section](#health-verification). - - -## Ceph Version Upgrades - -Rook v1.7 supports the following Ceph versions: - - Ceph Pacific 16.2.0 or newer - - Ceph Octopus v15.2.0 or newer - - Ceph Nautilus 14.2.5 or newer - -These are the only supported versions of Ceph. Rook v1.8 will no longer support Ceph Nautilus -(14.2.x), and users will have to upgrade Ceph to Octopus (15.2.x) or Pacific (16.2.x) upgrading to -Rook v1.8. - -> **IMPORTANT: When an update is requested, the operator will check Ceph's status, if it is in `HEALTH_ERR` it will refuse to do the upgrade.** - -Rook is cautious when performing upgrades. When an upgrade is requested (the Ceph image has been -updated in the CR), Rook will go through all the daemons one by one and will individually perform -checks on them. It will make sure a particular daemon can be stopped before performing the upgrade. -Once the deployment has been updated, it checks if this is ok to continue. After each daemon is -updated we wait for things to settle (monitors to be in a quorum, PGs to be clean for OSDs, up for -MDSes, etc.), then only when the condition is met we move to the next daemon. We repeat this process -until all the daemons have been updated. - -### **Ceph images** - -Official Ceph container images can be found on [Quay](https://quay.io/repository/ceph/ceph?tab=tags). -Prior to August 2021, official images were on docker.io. While those images will remain on Docker Hub, all new images are being pushed to Quay. - -These images are tagged in a few ways: - -* The most explicit form of tags are full-ceph-version-and-build tags (e.g., `v16.2.5-20210708`). - These tags are recommended for production clusters, as there is no possibility for the cluster to - be heterogeneous with respect to the version of Ceph running in containers. -* Ceph major version tags (e.g., `v16`) are useful for development and test clusters so that the - latest version of Ceph is always available. - -**Ceph containers other than the official images from the registry above will not be supported.** - -### **Example upgrade to Ceph Pacific** - -#### **1. Update the main Ceph daemons** - -The majority of the upgrade will be handled by the Rook operator. Begin the upgrade by changing the -Ceph image field in the cluster CRD (`spec.cephVersion.image`). - -```sh -NEW_CEPH_IMAGE='quay.io/ceph/ceph:v16.2.5-20210708' -CLUSTER_NAME="$ROOK_CLUSTER_NAMESPACE" # change if your cluster name is not the Rook namespace -kubectl -n $ROOK_CLUSTER_NAMESPACE patch CephCluster $CLUSTER_NAME --type=merge -p "{\"spec\": {\"cephVersion\": {\"image\": \"$NEW_CEPH_IMAGE\"}}}" -``` - -#### **2. Wait for the daemon pod updates to complete** - -As with upgrading Rook, you must now wait for the upgrade to complete. Status can be determined in a -similar way to the Rook upgrade as well. - -```sh -watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \tceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' -``` - -Determining when the Ceph has fully updated is rather simple. - -```console -kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' | sort | uniq -This cluster is not yet finished: - ceph-version=15.2.13-0 - ceph-version=16.2.5-0 -This cluster is finished: - ceph-version=16.2.5-0 -``` - -#### **3. Verify the updated cluster** - -Verify the Ceph cluster's health using the [health verification section](#health-verification). - - -## CSI Version - -If you have a cluster running with CSI drivers enabled and you want to configure Rook -to use non-default CSI images, the following settings will need to be applied for the desired -version of CSI. - -The operator configuration variables have recently moved from the operator deployment to the -`rook-ceph-operator-config` ConfigMap. The values in the operator deployment can still be set, -but if the ConfigMap settings are applied, they will override the operator deployment settings. - -```console -kubectl -n $ROOK_OPERATOR_NAMESPACE edit configmap rook-ceph-operator-config -``` - -The default upstream images are included below, which you can change to your desired images. - -```yaml -ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.4.0" -ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0" -ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2" -ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.2.1" -ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.2.0" -ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1" -CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" -``` - -### **Use default images** - -If you would like Rook to use the inbuilt default upstream images, then you may simply remove all -variables matching `ROOK_CSI_*_IMAGE` from the above ConfigMap and/or the operator deployment. - -### **Verifying updates** - -You can use the below command to see the CSI images currently being used in the cluster. Note that -not all images (like `volumereplication-operator`) may be present in every cluster depending on -which CSI features are enabled. - -```console -kubectl --namespace rook-ceph get pod -o jsonpath='{range .items[*]}{range .spec.containers[*]}{.image}{"\n"}' -l 'app in (csi-rbdplugin,csi-rbdplugin-provisioner,csi-cephfsplugin,csi-cephfsplugin-provisioner)' | sort | uniq -``` - -``` -k8s.gcr.io/sig-storage/csi-attacher:v3.2.1 -k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 -k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2 -k8s.gcr.io/sig-storage/csi-resizer:v1.2.0 -k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1 -quay.io/cephcsi/cephcsi:v3.4.0 -quay.io/csiaddons/volumereplication-operator:v0.1.0 -``` diff --git a/Documentation/common-issues.md b/Documentation/common-issues.md index ef2f4c66e..9a6054021 100644 --- a/Documentation/common-issues.md +++ b/Documentation/common-issues.md @@ -1,6 +1,6 @@ --- title: Common Issues -weight: 10500 +weight: 8000 --- # Common Issues @@ -8,10 +8,6 @@ weight: 10500 To help troubleshoot your Rook clusters, here are some tips on what information will help solve the issues you might be seeing. If after trying the suggestions found on this page and the problem is not resolved, the Rook team is very happy to help you troubleshoot the issues in their Slack channel. Once you have [registered for the Rook Slack](https://slack.rook.io), proceed to the General channel to ask for assistance. -## Ceph - -For common issues specific to Ceph, see the [Ceph Common Issues](ceph-common-issues.md) page. - # Troubleshooting Techniques Kubernetes status and logs are the the main resources needed to investigate issues in any Rook cluster. @@ -22,12 +18,12 @@ Kubernetes status is the first line of investigating when something goes wrong w * Rook pod status: * `kubectl get pod -n -o wide` - * e.g., `kubectl get pod -n rook-ceph -o wide` + * e.g., `kubectl get pod -n rook-cassandra -o wide` * Logs for Rook pods - * Logs for the operator: `kubectl logs -n -l app=` - * e.g., `kubectl logs -n rook-ceph -l app=rook-ceph-operator` - * Logs for a specific pod: `kubectl logs -n `, or a pod using a label such as mon1: `kubectl logs -n -l ` - * e.g., `kubectl logs -n rook-ceph -l mon=a` + * Logs for the operator: `kubectl logs -n -l app=rook-cassandra-cassandra` + * e.g., `kubectl logs -n rook-cassandra -l app=rook-cassandra-operator` + * Logs for a specific pod: `kubectl logs -n `, or a pod using a label such as mon1: `kubectl logs -n rook-cassandra -l ` + * e.g., `kubectl logs -n rook-cassandra -l mon=a` * Logs on a specific node to find why a PVC is failing to mount: * Connect to the node, then get kubelet logs (if your distro is using systemd): `journalctl -u kubelet` * Pods with multiple containers diff --git a/Documentation/development-environment.md b/Documentation/development-environment.md deleted file mode 100644 index 31e2c6efd..000000000 --- a/Documentation/development-environment.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -title: Multi-Node Test Environment -weight: 12100 -indent: true ---- - -# Multi-Node Test Environment - -* [Using KVM/QEMU and Kubespray](#using-kvmqemu-and-kubespray) -* [Using VirtualBox and k8s-vagrant-multi-node](#using-virtualbox-and-k8s-vagrant-multi-node) -* [Using Vagrant on Linux with libvirt](#using-vagrant-on-linux-with-libvirt) -* [Using CodeReady Containers for setting up single node openshift 4.x cluster](#using-codeready-containers-for-setting-up-single-node-openshift-4x-cluster) - -## Using KVM/QEMU and Kubespray - -### Setup expectation - -There are a bunch of pre-requisites to be able to deploy the following environment. Such as: - -* A Linux workstation (CentOS or Fedora) -* KVM/QEMU installation -* docker service allowing insecure local repository - -For other Linux distribution, there is no guarantee the following will work. -However adapting commands (apt/yum/dnf) could just work. - -### Prerequisites installation - -On your host machine, execute `tests/scripts/multi-node/rpm-system-prerequisites.sh` (or -do the equivalent for your distribution) - -Edit `/etc/docker/daemon.json` to add insecure-registries: - -```json -{ - "insecure-registries": ["172.17.8.1:5000"] -} -``` - -### Deploy Kubernetes with Kubespray - -Clone it: - -```console -git clone https://github.com/kubernetes-sigs/kubespray/ -cd kubespray -``` - -Edit `inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml` with: - -```console -docker_options: {% raw %}"--insecure-registry=172.17.8.1:5000 --insecure-registry={{ kube_service_addresses }} --data-root={{ docker_daemon_graph }} {{ docker_log_opts }}"{% endraw %} -``` - -FYI: `172.17.8.1` is the libvirt bridge IP, so it's reachable from all your virtual machines. -This means a registry running on the host machine is reachable from the virtual machines running the Kubernetes cluster. - -Create Vagrant's variable directory: - -```console -mkdir vagrant/ -``` - -Put `tests/scripts/multi-node/config.rb` in `vagrant/`. You can adapt it at will. -Feel free to adapt `num_instances`. - -Deploy! - -```console -vagrant up --no-provision ; vagrant provision -``` - -Go grab a coffee: - ->``` ->PLAY RECAP ********************************************************************* ->k8s-01 : ok=351 changed=111 unreachable=0 failed=0 ->k8s-02 : ok=230 changed=65 unreachable=0 failed=0 ->k8s-03 : ok=230 changed=65 unreachable=0 failed=0 ->k8s-04 : ok=229 changed=65 unreachable=0 failed=0 ->k8s-05 : ok=229 changed=65 unreachable=0 failed=0 ->k8s-06 : ok=229 changed=65 unreachable=0 failed=0 ->k8s-07 : ok=229 changed=65 unreachable=0 failed=0 ->k8s-08 : ok=229 changed=65 unreachable=0 failed=0 ->k8s-09 : ok=229 changed=65 unreachable=0 failed=0 -> ->Friday 12 January 2018 10:25:45 +0100 (0:00:00.017) 0:17:24.413 ******** ->=============================================================================== ->download : container_download | Download containers if pull is required or told to always pull (all nodes) - 192.44s ->kubernetes/preinstall : Update package management cache (YUM) --------- 178.26s ->download : container_download | Download containers if pull is required or told to always pull (all nodes) - 102.24s ->docker : ensure docker packages are installed -------------------------- 57.20s ->download : container_download | Download containers if pull is required or told to always pull (all nodes) -- 52.33s ->kubernetes/preinstall : Install packages requirements ------------------ 25.18s ->download : container_download | Download containers if pull is required or told to always pull (all nodes) -- 23.74s ->download : container_download | Download containers if pull is required or told to always pull (all nodes) -- 18.90s ->download : container_download | Download containers if pull is required or told to always pull (all nodes) -- 15.39s ->kubernetes/master : Master | wait for the apiserver to be running ------ 12.44s ->download : container_download | Download containers if pull is required or told to always pull (all nodes) -- 11.83s ->download : container_download | Download containers if pull is required or told to always pull (all nodes) -- 11.66s ->kubernetes/node : install | Copy kubelet from hyperkube container ------ 11.44s ->download : container_download | Download containers if pull is required or told to always pull (all nodes) -- 11.41s ->download : container_download | Download containers if pull is required or told to always pull (all nodes) -- 11.00s ->docker : Docker | pause while Docker restarts >-------------------------- 10.22s ->kubernetes/secrets : Check certs | check if a cert already exists on node --- 6.05s ->kubernetes-apps/network_plugin/flannel : Flannel | Wait for flannel subnet.env file presence --- 5.33s ->kubernetes/master : Master | wait for kube-scheduler -------------------- 5.30s ->kubernetes/master : Copy kubectl from hyperkube container --------------- 4.77s ->``` -```console -vagrant ssh k8s-01 -``` ->``` ->Last login: Fri Jan 12 09:22:18 2018 from 192.168.121.1 ->``` -```console -kubectl get nodes -``` ->``` ->NAME STATUS ROLES AGE VERSION ->k8s-01 Ready master,node 2m v1.9.0+coreos.0 ->k8s-02 Ready node 2m v1.9.0+coreos.0 ->k8s-03 Ready node 2m v1.9.0+coreos.0 ->k8s-04 Ready node 2m v1.9.0+coreos.0 ->k8s-05 Ready node 2m v1.9.0+coreos.0 ->k8s-06 Ready node 2m v1.9.0+coreos.0 ->k8s-07 Ready node 2m v1.9.0+coreos.0 ->k8s-08 Ready node 2m v1.9.0+coreos.0 ->k8s-09 Ready node 2m v1.9.0+coreos.0 ->``` - -### Running the Kubernetes Dashboard UI - -kubespray sets up the Dashboard pod by default, but you must authenticate with a bearer token, even for localhost access with kubectl proxy. To allow access, one possible solution is to: - -1) Create an admin user by creating admin-user.yaml with these contents (and using kubectl -f create admin-user.yaml): - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: admin-user - namespace: kube-system -``` - -2) Grant that user the ClusterRole authorization by creating and applying admin-user-cluster.role.yaml: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: ServiceAccount - name: admin-user - namespace: kube-system -``` - -3) Find the admin-user token in the kube-system namespace: - -```console -kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') -``` - -and you can use that token to log into the UI at http://localhost:8001/ui. - -(See [https://github.com/kubernetes/dashboard/wiki/Creating-sample-user](https://github.com/kubernetes/dashboard/wiki/Creating-sample-user)) - -### Development workflow on the host - -Everything should happen on the host, your development environment will reside on the host machine NOT inside the virtual machines running the Kubernetes cluster. - -Now, please refer to [https://rook.io/docs/rook/master/development-flow.html](https://rook.io/docs/rook/master/development-flow.html) to setup your development environment (go, git etc). - -At this stage, Rook should be cloned on your host. - -From your Rook repository (should be $GOPATH/src/github.com/rook) location execute `bash tests/scripts/multi-node/build-rook.sh`. -During its execution, `build-rook.sh` will purge all running Rook pods from the cluster, so that your latest container image can be deployed. -Furthermore, **all Ceph data and config will be purged** as well. -Ensure that you are done with all existing state on your test cluster before executing `build-rook.sh` as it will clear everything. - -Each time you build and deploy with `build-rook.sh`, the virtual machines (k8s-0X) will pull the new container image and run your new Rook code. -You can run `bash tests/scripts/multi-node/build-rook.sh` as many times as you want to rebuild your new rook image and redeploy a cluster that is running your new code. - -From here, resume your dev, change your code and test it by running `bash tests/scripts/multi-node/build-rook.sh`. - -### Teardown - -Typically, to flush your environment you will run the following from within kubespray's git repository. -This action will be performed on the host: - -```console -vagrant destroy -f -``` - -Also, if you were using `kubectl` on that host machine, you can resurrect your old configuration by renaming `$HOME/.kube/config.before.rook.$TIMESTAMP` with `$HOME/.kube/config`. - -If you were not using `kubectl`, feel free to simply remove `$HOME/.kube/config.rook`. - -## Using VirtualBox and k8s-vagrant-multi-node - -### Prerequisites - -Be sure to follow the prerequisites here: https://github.com/galexrt/k8s-vagrant-multi-node/tree/master#prerequisites. - -### Quickstart - -To start up the environment just run `./tests/scripts/k8s-vagrant-multi-node.sh up`. -This will bring up one master and 2 workers by default. - -To change the amount of workers to bring up and their resources, be sure to checkout the [galexrt/k8s-vagrant-multi-node project README Variables section](https://github.com/galexrt/k8s-vagrant-multi-node/tree/master#variables). -Just set or export the variables as you need on the script, e.g., either `NODE_COUNT=5 ./tests/scripts/k8s-vagrant-multi-node.sh up`, or `export NODE_COUNT=5` and then `./tests/scripts/k8s-vagrant-multi-node.sh up`. - -For more information or if you are experiencing issues, please create an issue at [GitHub galexrt/k8s-vagrant-multi-node](https://github.com/galexrt/k8s-vagrant-multi-node). - -## Using Vagrant on Linux with libvirt - -See https://github.com/noahdesu/kubensis. - -## Using CodeReady Containers for setting up single node openshift 4.x cluster - -See https://code-ready.github.io/crc/ diff --git a/Documentation/development-flow.md b/Documentation/development-flow.md index 414d589e4..655449fcf 100644 --- a/Documentation/development-flow.md +++ b/Documentation/development-flow.md @@ -10,7 +10,7 @@ don't hesitate to reach out to us on our [Slack](https://Rook-io.slack.com) dev ## Prerequisites -1. [GO 1.13](https://golang.org/dl/) or greater installed +1. [GO 1.16](https://golang.org/dl/) or greater installed 2. Git client installed 3. Github account @@ -18,7 +18,7 @@ don't hesitate to reach out to us on our [Slack](https://Rook-io.slack.com) dev ### Create a Fork -From your browser navigate to [http://github.com/rook/rook](http://github.com/rook/rook) and click the "Fork" button. +From your browser navigate to [http://github.com/rook/cassandra](http://github.com/rook/cassandra) and click the "Fork" button. ### Clone Your Fork @@ -32,22 +32,16 @@ mkdir -p $GOPATH/src/github.com/rook cd $GOPATH/src/github.com/rook # Clone your fork, where is your GitHub account name -$ git clone https://github.com//rook.git +$ git clone https://github.com//cassandra.git cd rook ``` ### Build +Build the Cassandra operator image: + ```console -# build all rook storage providers make - -# build a single storage provider, where the IMAGES can be a subdirectory of the "images" folder: -# "cassandra", "ceph", or "nfs" -make IMAGES="cassandra" build - -# multiple storage providers can also be built -make IMAGES="cassandra ceph" build ``` If you want to use `podman` instead of `docker` then uninstall `docker` packages from your machine, make will automatically pick up `podman`. @@ -87,7 +81,7 @@ First you will need to add the upstream remote to your local git: ```console # Add 'upstream' to the list of remotes -git remote add upstream https://github.com/rook/rook.git +git remote add upstream https://github.com/rook/cassandra.git # Verify the remote was added git remote -v @@ -103,36 +97,26 @@ A source code layout is shown below, annotated with comments about the use of ea rook ├── build # build makefiles and logic to build, publish and release all Rook artifacts ├── cluster -│   ├── charts # Helm charts -│   │   └── rook-ceph │   └── examples # Sample yaml files for Rook cluster │ ├── cmd # Binaries with main entrypoint -│   ├── rook # Main command entry points for operators and daemons -│   └── rookflex # Main command entry points for Rook flexvolume driver +│   └── rook # Main command entry points for operators and daemons │ -├── design # Design documents for the various components of the Rook project +├── design # Design documents ├── Documentation # Rook project Documentation ├── images # Dockerfiles to build images for all supported storage providers │ ├── pkg │   ├── apis -│   │   ├── ceph.rook.io # ceph specific specs for cluster, file, object -│   │   │   ├── v1 -│   │   ├── nfs.rook.io # nfs server specific specs +│   │   ├── cassandra.rook.io # cassandra server specific specs │   │   │   └── v1alpha1 │   │   └── rook.io # rook.io API group of common types │   │   └── v1alpha2 │   ├── client # auto-generated strongly typed client code to access Rook APIs │   ├── clusterd -│   ├── daemon # daemons for each storage provider -│   │   ├── ceph -│   │   └── discover -│   ├── operator # all orchestration logic and custom controllers for each storage provider -│   │   ├── ceph -│   │   ├── discover +│   ├── operator # all orchestration logic and custom controllers for the cassandra operator +│   │   ├── cassandra │   │   ├── k8sutil -│   │   ├── nfs │   │   └── test │   ├── test │   ├── util @@ -143,9 +127,7 @@ rook    │   ├── installer # installs Rook and its supported storage providers into integration tests environments    │   └── utils    ├── integration # all test cases that will be invoked during integration testing -    ├── longhaul # longhaul tests    └── scripts # scripts for setting up integration and manual testing environments - ``` ## Development @@ -177,7 +159,7 @@ Submit a pull request for the design to be discussed and approved by the communi An issue should be opened to track the work of authoring and completing the design document. This issue is in addition to the issue that is tracking the implementation of the feature. -The [design label](https://github.com/rook/rook/labels/design) should be assigned to the issue to denote it as such. +The [design label](https://github.com/rook/cassandra/labels/design) should be assigned to the issue to denote it as such. ### Create a Branch @@ -243,13 +225,13 @@ go tool cover -html=coverage.out -o coverage.html #### Running the Integration Tests For instructions on how to execute the end to end smoke test suite, -follow the [test instructions](https://github.com/rook/rook/blob/master/tests/README.md). +follow the [test instructions](https://github.com/rook/cassandra/blob/master/tests/README.md). ### Commit structure Rook maintainers value clear, lengthy and explanatory commit messages. So by default each of your commits must: -* be prefixed by the component it's affecting, if Ceph, then the title of the commit message should be `ceph: my commit title`. If not the commit-lint bot will complain. +* be prefixed by the component it's affecting * contain a commit message which explains the original issue and how it was fixed if a bug. If a feature it is a full description of the new functionality. * refer to the issue it's closing, this is mandatory when fixing a bug @@ -265,7 +247,7 @@ component: commit title This is the commit message, here I'm explaining, what the bug was along with its root cause. Then I'm explaining how I fixed it. -Closes: https://github.com/rook/rook/issues/ +Closes: https://github.com/rook/cassandra/issues/ Signed-off-by: First Name Last Name ``` @@ -273,11 +255,9 @@ The `component` **MUST** be one of the following: - bot - build - cassandra -- ceph - ci - core - docs -- nfs - test Note: sometimes you will feel like there is not so much to say, for instance if you are fixing a typo in a text. @@ -305,7 +285,7 @@ Once your commit history is clean, ensure you have based on the [latest upstream ### Submitting -Go to the [Rook github](https://www.github.com/rook/rook) to open the PR. If you have pushed recently, you should see an obvious link to open the PR. If you have not pushed recently, go to the Pull Request tab and select your fork and branch for the PR. +Go to the [Rook github](https://www.github.com/rook/cassandra) to open the PR. If you have pushed recently, you should see an obvious link to open the PR. If you have not pushed recently, go to the Pull Request tab and select your fork and branch for the PR. After the PR is open, you can make changes simply by pushing new commits. Your PR will track the changes in your fork and update automatically. @@ -321,14 +301,3 @@ The flow for getting a fix into a release branch is: 3. After your PR is merged to master, the mergify bot will automatically open a PR with your commits backported to the release branch 4. If there are any conflicts you will need to resolve them by pulling the branch, resolving the conflicts and force push back the branch 5. After the CI is green, the bot will automatically merge the backport PR. - -## Debugging operators locally - -Operators are meant to be run inside a Kubernetes cluster. However, this makes it harder to use debugging tools and slows down the developer cycle of edit-build-test since testing requires to build a container image, push to the cluster, restart the pods, get logs, etc. - -A common operator developer practice is to run the operator locally on the developer machine in order to leverage the developer tools and comfort. - -In order to support this external operator mode, rook detects if the operator is running outside of the cluster (using standard cluster env) and changes the behavior as follows: - -* Connecting to Kubernetes API will load the config from the user `~/.kube/config`. -* Instead of the default [CommandExecutor](../pkg/util/exec/exec.go) this mode uses a [TranslateCommandExecutor](../pkg/util/exec/translate_exec.go) that executes every command issued by the operator to run as a Kubernetes job inside the cluster, so that any tools that the operator needs from its image can be called. diff --git a/Documentation/direct-tools.md b/Documentation/direct-tools.md deleted file mode 100644 index 62aa95cc4..000000000 --- a/Documentation/direct-tools.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Direct Tools -weight: 11200 -indent: true ---- - -# Direct Tools - -Rook is designed with Kubernetes design principles from the ground up. This topic is going to escape the bounds of Kubernetes storage and show you how to -use block and file storage directly from a pod without any of the Kubernetes magic. The purpose of this topic is to help you quickly test a new configuration, -although it is not meant to be used in production. All of the benefits of Kubernetes storage including failover, detach, and attach will not be available. -If your pod dies, your mount will die with it. - -## Start the Direct Mount Pod - -To test mounting your Ceph volumes, start a pod with the necessary mounts. An example is provided in the examples test directory: - -```console -kubectl create -f cluster/examples/kubernetes/ceph/direct-mount.yaml -``` - -After the pod is started, connect to it like this: - -```console -kubectl -n rook-ceph get pod -l app=rook-direct-mount -$ kubectl -n rook-ceph exec -it bash -``` - -## Block Storage Tools - -After you have created a pool as described in the [Block Storage](ceph-block.md) topic, you can create a block image and mount it directly in a pod. -This example will show how the Ceph rbd volume can be mounted in the direct mount pod. - -Create the [Direct Mount Pod](direct-tools.md#Start-the-Direct-Mount-Pod). - -Create a volume image (10MB): - -```console -rbd create replicapool/test --size 10 -rbd info replicapool/test - -# Disable the rbd features that are not in the kernel module -rbd feature disable replicapool/test fast-diff deep-flatten object-map -``` - -Map the block volume and format it and mount it: - -```console -# Map the rbd device. If the Direct Mount Pod was started with "hostNetwork: false" this hangs and you have to stop it with Ctrl-C, -# however the command still succeeds; see https://github.com/rook/rook/issues/2021 -rbd map replicapool/test - -# Find the device name, such as rbd0 -lsblk | grep rbd - -# Format the volume (only do this the first time or you will lose data) -mkfs.ext4 -m0 /dev/rbd0 - -# Mount the block device -mkdir /tmp/rook-volume -mount /dev/rbd0 /tmp/rook-volume -``` - -Write and read a file: - -```console -echo "Hello Rook" > /tmp/rook-volume/hello -cat /tmp/rook-volume/hello -``` - -### Unmount the Block device - -Unmount the volume and unmap the kernel device: - -```console -umount /tmp/rook-volume -rbd unmap /dev/rbd0 -``` - -## Shared Filesystem Tools - -After you have created a filesystem as described in the [Shared Filesystem](ceph-filesystem.md) topic, you can mount the filesystem from multiple pods. -The the other topic you may have mounted the filesystem already in the registry pod. Now we will mount the same filesystem in the Direct Mount pod. -This is just a simple way to validate the Ceph filesystem and is not recommended for production Kubernetes pods. - -Follow [Direct Mount Pod](direct-tools.md#Start-the-Direct-Mount-Pod) to start a pod with the necessary mounts and then proceed with the following commands after connecting to the pod. - -```console -# Create the directory -mkdir /tmp/registry - -# Detect the mon endpoints and the user secret for the connection -mon_endpoints=$(grep mon_host /etc/ceph/ceph.conf | awk '{print $3}') -my_secret=$(grep key /etc/ceph/keyring | awk '{print $3}') - -# Mount the filesystem -mount -t ceph -o mds_namespace=myfs,name=admin,secret=$my_secret $mon_endpoints:/ /tmp/registry - -# See your mounted filesystem -df -h -``` - -Now you should have a mounted filesystem. If you have pushed images to the registry you will see a directory called `docker`. - -```console -ls /tmp/registry -``` - -Try writing and reading a file to the shared filesystem. - -```console -echo "Hello Rook" > /tmp/registry/hello -cat /tmp/registry/hello - -# delete the file when you're done -rm -f /tmp/registry/hello -``` - -### Unmount the Filesystem - -To unmount the shared filesystem from the Direct Mount Pod: - -```console -umount /tmp/registry -rmdir /tmp/registry -``` - -No data will be deleted by unmounting the filesystem. diff --git a/Documentation/flexvolume.md b/Documentation/flexvolume.md deleted file mode 100644 index 58af53e38..000000000 --- a/Documentation/flexvolume.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: FlexVolume Configuration -weight: 1200 -indent: true ---- - -# Ceph FlexVolume Configuration - -FlexVolume is not enabled by default since Rook v1.1. This documentation applies only if you have enabled FlexVolume. - -If enabled, Rook uses [FlexVolume](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md) to integrate with Kubernetes for performing storage operations. In some operating systems where Kubernetes is deployed, the [default Flexvolume plugin directory](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md#prerequisites) (the directory where FlexVolume drivers are installed) is **read-only**. - -Some Kubernetes deployments require you to configure kubelet with a FlexVolume plugin directory that is accessible and read/write (`rw`). These steps need to be carried out on **all nodes** in your cluster. Rook needs to be told where this directory is in order for the volume plugin to work. - -Platform-specific instructions for the following Kubernetes deployment platforms are linked below - -* [Default FlexVolume path](#default-flexvolume-path) -* [Atomic](#atomic) -* [Azure AKS](#azure-aks) -* [ContainerLinux](#containerlinux) -* [Google Kubernetes Engine (GKE)](#google-kubernetes-engine-gke) -* [Kubespray](#kubespray) -* [OpenShift](#openshift) -* [OpenStack Magnum](#openstack-magnum) -* [Rancher](#rancher) -* [Tectonic](#tectonic) -* [Custom containerized kubelet](#custom-containerized-kubelet) -* [Configuring the FlexVolume path](#configuring-the-flexvolume-path) - -## Default FlexVolume path - -If you are not using a platform that is listed above and the path `/usr/libexec/kubernetes/kubelet-plugins/volume/exec/` is read/write, you don't need to configure anything. - -That is because `/usr/libexec/kubernetes/kubelet-plugins/volume/exec/` is the kubelet default FlexVolume path and Rook assumes the default FlexVolume path if not set differently. - -If running `mkdir -p /usr/libexec/kubernetes/kubelet-plugins/volume/exec/` should give you an error about read-only filesystem, you need to use [another read/write FlexVolume path](#other-common-readwrite-flexvolume-paths) and configure it on the Rook operator and kubelet. - -These are the other commonly used paths: - -* `/var/lib/kubelet/volumeplugins` -* `/var/lib/kubelet/volume-plugins` - -Continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - -## Atomic - -See the [OpenShift](#openshift) section, unless running with OpenStack Magnum, then see [OpenStack Magnum](#openstack-magnum) section. - -## Azure AKS - -AKS uses a non-standard FlexVolume plugin directory: `/etc/kubernetes/volumeplugins` -The kubelet on AKS is already configured to use that directory. - -Continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - -## ContainerLinux - -Use the [Most common read/write FlexVolume path](#most-common-readwrite-flexvolume-path) for the next steps. - -The kubelet's systemD unit file can be located at: `/etc/systemd/system/kubelet.service`. - -Continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - -## Google Kubernetes Engine (GKE) - -Google's Kubernetes Engine uses a non-standard FlexVolume plugin directory: `/home/kubernetes/flexvolume` -The kubelet on GKE is already configured to use that directory. - -Continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - -## Kubespray - -### Prior to v2.11.0 - -Kubespray uses the [kubelet_flexvolumes_plugins_dir](https://github.com/kubernetes-sigs/kubespray/blob/v2.11.0/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml#L206) variable to define where it sets the plugin directory. - -Kubespray prior to the v2.11.0 release [used a non-standard FlexVolume plugin directory](https://github.com/kubernetes-sigs/kubespray/blob/f47a66622743aa31970cebeca7968a0939cb700d/roles/kubernetes/node/defaults/main.yml#L53): `/var/lib/kubelet/volume-plugins`. -The Kubespray configured kubelet is already configured to use that directory. - -If you are using kubespray v2.10.x or older, continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - -### As of v2.11.0 and newer - -Kubespray v2.11.0 included https://github.com/kubernetes-sigs/kubespray/pull/4752 which sets the same plugin directory assumed by rook by default: `/usr/libexec/kubernetes/kubelet-plugins/volume/exec`. - -No special configuration of the directory is needed in Rook unless: - -* Kubespray is deployed onto a platform where the default path is not writable, or -* you have explicitly defined a custom path in the [kubelet_flexvolumes_plugins_dir](https://github.com/kubernetes-sigs/kubespray/blob/v2.11.0/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml#L206) variable - -If you have not defined one, and the default path is not writable, the [alternate configuration](https://github.com/kubernetes-sigs/kubespray/blob/v2.11.0/roles/kubernetes/preinstall/tasks/0040-set_facts.yml#L189) is `/var/lib/kubelet/volumeplugins` - -If needed, continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - -## OpenShift - -To find out which FlexVolume directory path you need to set on the Rook operator, please look at the OpenShift docs of the version you are using, [latest OpenShift Flexvolume docs](https://docs.openshift.org/latest/install_config/persistent_storage/persistent_storage_flex_volume.html#flexvolume-installation) (they also contain the FlexVolume path for Atomic). - -Continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - -## OpenStack Magnum - -OpenStack Magnum is using Atomic, which uses a non-standard FlexVolume plugin directory at: `/var/lib/kubelet/volumeplugins` -The kubelet in OpenStack Magnum is already configured to use that directory. -You will need to use this value when [configuring the Rook operator](#configuring-the-rook-operator) - -## Rancher - -Rancher provides an easy way to configure kubelet. The FlexVolume flag will be shown later on in the [configuring the FlexVolume path](#configuring-the-flexvolume-path). -It can be provided to the kubelet configuration template at deployment time or by using the `up to date` feature if Kubernetes is already deployed. - -Rancher deploys kubelet as a docker container, you need to mount the host's flexvolume path into the kubelet image as a volume, -this can be done in the `extra_binds` section of the kubelet cluster config. - -Configure the Rancher deployed kubelet by updating the `cluster.yml` file kubelet section: - -```yaml -services: - kubelet: - extra_args: - volume-plugin-dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec - extra_binds: - - /usr/libexec/kubernetes/kubelet-plugins/volume/exec:/usr/libexec/kubernetes/kubelet-plugins/volume/exec -``` - -If you're using [rke](https://github.com/rancher/rke), run `rke up`, this will update and restart your kubernetes cluster system components, in this case the kubelet docker instance(s) -will get restarted with the new volume bind and volume plugin dir flag. - -The default FlexVolume path for Rancher is `/usr/libexec/kubernetes/kubelet-plugins/volume/exec` which is also the default -FlexVolume path for the Rook operator. - -If the default path as above is used no further configuration is required, otherwise if a different path is used -the Rook operator will need to be reconfigured, to do this continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - -## Tectonic - -Follow [these instructions](tectonic.md) to configure the Flexvolume plugin for Rook on Tectonic during ContainerLinux node ignition file provisioning. -If you want to use Rook with an already provisioned Tectonic cluster, please refer to the [ContainerLinux](#containerlinux) section. - -Continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - -## Custom containerized kubelet - -Use the [most common read/write FlexVolume path](#most-common-readwrite-flexvolume-path) for the next steps. - -If your kubelet is running as a (Docker, rkt, etc) container you need to make sure that this directory from the host is reachable by the kubelet inside the container. - -Continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - -## Configuring the FlexVolume path - -If the environment specific section doesn't mention a FlexVolume path in this doc or external docs, please refer to the [most common read/write FlexVolume path](#most-common-readwrite-flexvolume-path) section, before continuing to [configuring the FlexVolume path](#configuring-the-flexvolume-path). - -### Configuring the Rook operator - -You must provide the above found FlexVolume path when deploying the [rook-operator](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/operator.yaml) by setting the environment variable `FLEXVOLUME_DIR_PATH`. - -**Example**: - -```yaml -spec: - template: - spec: - containers: -[...] - - name: rook-ceph-operator - env: -[...] - - name: FLEXVOLUME_DIR_PATH - value: "/var/lib/kubelet/volumeplugins" -[...] -``` - -(In the `operator.yaml` manifest replace `` with the path or if you use helm set the `agent.flexVolumeDirPath` to the FlexVolume path) - -### Configuring the Kubernetes kubelet - -You need to add the flexvolume flag with the path to all nodes's kubelet in the Kubernetes cluster: - -```console ---volume-plugin-dir=PATH_TO_FLEXVOLUME -``` - -(Where the `PATH_TO_FLEXVOLUME` is the above found FlexVolume path) - -The location where you can set the kubelet FlexVolume path (flag) depends on your platform. -Please refer to your platform documentation for that and/or the [platform specific FlexVolume path](#platform-specific-flexvolume-path) for information about that. - -After adding the flag to kubelet, kubelet must be restarted for it to pick up the new flag. diff --git a/Documentation/helm-ceph-cluster.md b/Documentation/helm-ceph-cluster.md deleted file mode 100644 index 07fc35a4a..000000000 --- a/Documentation/helm-ceph-cluster.md +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: Ceph Cluster -weight: 10200 -indent: true ---- - -{% include_relative branch.liquid %} - -# Ceph Cluster Helm Chart - -Creates Rook resources to configure a [Ceph](https://ceph.io/) cluster using the [Helm](https://helm.sh) package manager. -This chart is a simple packaging of templates that will optionally create Rook resources such as: -- CephCluster, CephFilesystem, and CephObjectStore CRs -- Storage classes to expose Ceph RBD volumes, CephFS volumes, and RGW buckets -- Ingress for external access to the dashboard -- Toolbox - -## Prerequisites - -* Kubernetes 1.13+ -* Helm 3.x -* Preinstalled Rook Operator. See the [Helm Operator](helm-operator.md) topic to install. - -## Installing - -The `helm install` command deploys rook on the Kubernetes cluster in the default configuration. -The [configuration](#configuration) section lists the parameters that can be configured during installation. It is -recommended that the rook operator be installed into the `rook-ceph` namespace. The clusters can be installed -into the same namespace as the operator or a separate namespace. - -Rook currently publishes builds of this chart to the `release` and `master` channels. - -**Before installing, review the values.yaml to confirm if the default settings need to be updated.** -* If the operator was installed in a namespace other than `rook-ceph`, the namespace - must be set in the `operatorNamespace` variable. -* Set the desired settings in the `cephClusterSpec`. The [defaults](https://github.com/rook/rook/tree/{{ branchName }}/cluster/charts/rook-ceph-cluster/values.yaml) - are only an example and not likely to apply to your cluster. -* The `monitoring` section should be removed from the `cephClusterSpec`, as it is specified separately in the helm settings. -* The default values for `cephBlockPools`, `cephFileSystems`, and `CephObjectStores` will create one of each, and their corresponding storage classes. - -### Release - -The release channel is the most recent release of Rook that is considered stable for the community. - -The example install assumes you have created a values-override.yaml. - -```console -helm repo add rook-release https://charts.rook.io/release -helm install --create-namespace --namespace rook-ceph rook-ceph-cluster \ - --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f values-override.yaml -``` - -## Configuration - -The following tables lists the configurable parameters of the rook-operator chart and their default values. - -| Parameter | Description | Default | -| ---------------------- | -------------------------------------------------------------------- | ----------- | -| `operatorNamespace` | Namespace of the Rook Operator | `rook-ceph` | -| `configOverride` | Cluster ceph.conf override | | -| `toolbox.enabled` | Enable Ceph debugging pod deployment. See [toolbox](ceph-toolbox.md) | `false` | -| `toolbox.tolerations` | Toolbox tolerations | `[]` | -| `toolbox.affinity` | Toolbox affinity | `{}` | -| `monitoring.enabled` | Enable Prometheus integration, will also create necessary RBAC rules | `false` | -| `cephClusterSpec.*` | Cluster configuration, see below | See below | -| `ingress.dashboard` | Enable an ingress for the ceph-dashboard | `{}` | -| `cephBlockPools.[*]` | A list of CephBlockPool configurations to deploy | See below | -| `cephFileSystems.[*]` | A list of CephFileSystem configurations to deploy | See below | -| `cephObjectStores.[*]` | A list of CephObjectStore configurations to deploy | See below | - -### Ceph Cluster Spec - -The `CephCluster` CRD takes its spec from `cephClusterSpec.*`. This is not an exhaustive list of parameters. -For the full list, see the [Cluster CRD](ceph-cluster-crd.md) topic. - -### Ceph Block Pools - -The `cephBlockPools` array in the values file will define a list of CephBlockPool as described in the table below. - -| Parameter | Description | Default | -| ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | -| `name` | The name of the CephBlockPool | `ceph-blockpool` | -| `spec` | The CephBlockPool spec, see the [CephBlockPool](ceph-pool-crd.md#spec) documentation. | `{}` | -| `storageClass.enabled` | Whether a storage class is deployed alongside the CephBlockPool | `true` | -| `storageClass.isDefault` | Whether the storage class will be the default storage class for PVCs. See the PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) documentation for details. | `true` | -| `storageClass.name` | The name of the storage class | `ceph-block` | -| `storageClass.parameters` | See [Block Storage](ceph-block.md) documentation or the helm values.yaml for suitable values | see values.yaml | -| `storageClass.reclaimPolicy` | The default [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy) to apply to PVCs created with this storage class. | `Delete` | -| `storageClass.allowVolumeExpansion` | Whether [volume expansion](https://kubernetes.io/docs/concepts/storage/storage-classes/#allow-volume-expansion) is allowed by default. | `true` | - -### Ceph File Systems - -The `cephFileSystems` array in the values file will define a list of CephFileSystem as described in the table below. - -| Parameter | Description | Default | -| ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- | -| `name` | The name of the CephFileSystem | `ceph-filesystem` | -| `spec` | The CephFileSystem spec, see the [CephFilesystem CRD](ceph-filesystem-crd.md) documentation. | see values.yaml | -| `storageClass.enabled` | Whether a storage class is deployed alongside the CephFileSystem | `true` | -| `storageClass.name` | The name of the storage class | `ceph-filesystem` | -| `storageClass.parameters` | See [Shared Filesystem](ceph-filesystem.md) documentation or the helm values.yaml for suitable values | see values.yaml | -| `storageClass.reclaimPolicy` | The default [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy) to apply to PVCs created with this storage class. | `Delete` | - -### Ceph Object Stores - -The `cephObjectStores` array in the values file will define a list of CephObjectStore as described in the table below. - -| Parameter | Description | Default | -| ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | -| `name` | The name of the CephObjectStore | `ceph-objectstore` | -| `spec` | The CephObjectStore spec, see the [CephObjectStore CRD](ceph-object-store-crd.md) documentation. | see values.yaml | -| `storageClass.enabled` | Whether a storage class is deployed alongside the CephObjectStore | `true` | -| `storageClass.name` | The name of the storage class | `ceph-bucket` | -| `storageClass.parameters` | See [Object Store storage class](ceph-object-bucket-claim.md) documentation or the helm values.yaml for suitable values | see values.yaml | -| `storageClass.reclaimPolicy` | The default [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy) to apply to PVCs created with this storage class. | `Delete` | - -### Existing Clusters - -If you have an existing CephCluster CR that was created without the helm chart and you want the helm -chart to start managing the cluster: - -1. Extract the `spec` section of your existing CephCluster CR and copy to the `cephClusterSpec` - section in `values-override.yaml`. - -2. Add the following annotations and label to your existing CephCluster CR: - -``` - annotations: - meta.helm.sh/release-name: rook-ceph-cluster - meta.helm.sh/release-namespace: rook-ceph - labels: - app.kubernetes.io/managed-by: Helm -``` - -1. Run the `helm install` command in the [Installing section](#release) to create the chart. - -2. In the future when updates to the cluster are needed, ensure the values-override.yaml always - contains the desired CephCluster spec. - -### Development Build - -To deploy from a local build from your development environment: - -```console -cd cluster/charts/rook-ceph-cluster -helm install --create-namespace --namespace rook-ceph rook-ceph-cluster -f values-override.yaml . -``` - -## Uninstalling the Chart - -To see the currently installed Rook chart: - -```console -helm ls --namespace rook-ceph -``` - -To uninstall/delete the `rook-ceph-cluster` chart: - -```console -helm delete --namespace rook-ceph rook-ceph-cluster -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. Removing the cluster -chart does not remove the Rook operator. In addition, all data on hosts in the Rook data directory -(`/var/lib/rook` by default) and on OSD raw devices is kept. To reuse disks, you will have to wipe them before recreating the cluster. - -See the [teardown documentation](ceph-teardown.md) for more information. diff --git a/Documentation/helm-operator.md b/Documentation/helm-operator.md deleted file mode 100644 index 7049ca665..000000000 --- a/Documentation/helm-operator.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Ceph Operator -weight: 10100 -indent: true ---- - -{% include_relative branch.liquid %} - -# Ceph Operator Helm Chart - -Installs [rook](https://github.com/rook/rook) to create, configure, and manage Ceph clusters on Kubernetes. - -## Introduction - -This chart bootstraps a [rook-ceph-operator](https://github.com/rook/rook) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -## Prerequisites - -* Kubernetes 1.13+ -* Helm 3.x - -See the [Helm support matrix](https://helm.sh/docs/topics/version_skew/) for more details. - -## Installing - -The Ceph Operator helm chart will install the basic components necessary to create a storage platform for your Kubernetes cluster. -1. Install the Helm chart -1. [Create a Rook cluster](ceph-quickstart.md#create-a-rook-cluster). - -The `helm install` command deploys rook on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. It is recommended that the rook operator be installed into the `rook-ceph` namespace (you will install your clusters into separate namespaces). - -Rook currently publishes builds of the Ceph operator to the `release` and `master` channels. - -### Release - -The release channel is the most recent release of Rook that is considered stable for the community. - -```console -helm repo add rook-release https://charts.rook.io/release -helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph -``` - -### Development Build - -To deploy from a local build from your development environment: - -1. Build the Rook docker image: `make` -1. Copy the image to your K8s cluster, such as with the `docker save` then the `docker load` commands -1. Install the helm chart: - -```console -cd cluster/charts/rook-ceph -kubectl create namespace rook-ceph -helm install --namespace rook-ceph rook-ceph . -``` - -## Uninstalling the Chart - -To see the currently installed Rook chart: - -```console -helm ls --namespace rook-ceph -``` - -To uninstall/delete the `rook-ceph` deployment: - -```console -helm delete --namespace rook-ceph rook-ceph -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -After uninstalling you may want to clean up the CRDs as described on the [teardown documentation](ceph-teardown.md#removing-the-cluster-crd-finalizer). - -## Configuration - -The following tables lists the configurable parameters of the rook-operator chart and their default values. - -| Parameter | Description | Default | -| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------- | -| `image.repository` | Image | `rook/ceph` | -| `image.tag` | Image tag | `master` | -| `image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `crds.enabled` | If true, the helm chart will create the Rook CRDs. Do NOT change to `false` in a running cluster or CRs will be deleted! | `true` | -| `rbacEnable` | If true, create & use RBAC resources | `true` | -| `pspEnable` | If true, create & use PSP resources | `true` | -| `resources` | Pod resource requests & limits | `{}` | -| `annotations` | Pod annotations | `{}` | -| `logLevel` | Global log level | `INFO` | -| `nodeSelector` | Kubernetes `nodeSelector` to add to the Deployment. | | -| `tolerations` | List of Kubernetes `tolerations` to add to the Deployment. | `[]` | -| `unreachableNodeTolerationSeconds` | Delay to use for the node.kubernetes.io/unreachable pod failure toleration to override the Kubernetes default of 5 minutes | `5s` | -| `currentNamespaceOnly` | Whether the operator should watch cluster CRD in its own namespace or not | `false` | -| `hostpathRequiresPrivileged` | Runs Ceph Pods as privileged to be able to write to `hostPath`s in OpenShift with SELinux restrictions. | `false` | -| `discover.priorityClassName` | The priority class name to add to the discover pods | | -| `discover.toleration` | Toleration for the discover pods | | -| `discover.tolerationKey` | The specific key of the taint to tolerate | | -| `discover.tolerations` | Array of tolerations in YAML format which will be added to discover deployment | | -| `discover.nodeAffinity` | The node labels for affinity of `discover-agent` (***) | | -| `discover.podLabels` | Labels to add to the discover pods. | | -| `csi.enableRbdDriver` | Enable Ceph CSI RBD driver. | `true` | -| `csi.enableCephfsDriver` | Enable Ceph CSI CephFS driver. | `true` | -| `csi.enableCephfsSnapshotter` | Enable Snapshotter in CephFS provisioner pod. | `true` | -| `csi.enableRBDSnapshotter` | Enable Snapshotter in RBD provisioner pod. | `true` | -| `csi.pluginPriorityClassName` | PriorityClassName to be set on csi driver plugin pods. | | -| `csi.provisionerPriorityClassName` | PriorityClassName to be set on csi driver provisioner pods. | | -| `csi.enableOMAPGenerator` | EnableOMAP generator deploys omap sidecar in CSI provisioner pod, to enable it set it to true | `false` | -| `csi.rbdFSGroupPolicy` | Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted | ReadWriteOnceWithFSType | -| `csi.cephFSFSGroupPolicy` | Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted | `None` | -| `csi.logLevel` | Set logging level for csi containers. Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. | `0` | -| `csi.enableGrpcMetrics` | Enable Ceph CSI GRPC Metrics. | `false` | -| `csi.enableCSIHostNetwork` | Enable Host Networking for Ceph CSI nodeplugins. | `false` | -| `csi.provisionerTolerations` | Array of tolerations in YAML format which will be added to CSI provisioner deployment. | | -| `csi.provisionerNodeAffinity` | The node labels for affinity of the CSI provisioner deployment (***) | | -| `csi.pluginTolerations` | Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet | | -| `csi.pluginNodeAffinity` | The node labels for affinity of the CephCSI plugin DaemonSet (***) | | -| `csi.rbdProvisionerTolerations` | Array of tolerations in YAML format which will be added to CephCSI RBD provisioner deployment. | | -| `csi.rbdProvisionerNodeAffinity` | The node labels for affinity of the CephCSI RBD provisioner deployment (***) | | -| `csi.rbdPluginTolerations` | Array of tolerations in YAML format which will be added to CephCSI RBD plugin DaemonSet | | -| `csi.rbdPluginNodeAffinity` | The node labels for affinity of the CephCSI RBD plugin DaemonSet (***) | | -| `csi.cephFSProvisionerTolerations` | Array of tolerations in YAML format which will be added to CephCSI CephFS provisioner deployment. | | -| `csi.cephFSProvisionerNodeAffinity` | The node labels for affinity of the CephCSI CephFS provisioner deployment (***) | | -| `csi.cephFSPluginTolerations` | Array of tolerations in YAML format which will be added to CephCSI CephFS plugin DaemonSet | | -| `csi.cephFSPluginNodeAffinity` | The node labels for affinity of the CephCSI CephFS plugin DaemonSet (***) | | -| `csi.csiRBDProvisionerResource` | CEPH CSI RBD provisioner resource requirement list. | | -| `csi.csiRBDPluginResource` | CEPH CSI RBD plugin resource requirement list. | | -| `csi.csiCephFSProvisionerResource` | CEPH CSI CephFS provisioner resource requirement list. | | -| `csi.csiCephFSPluginResource` | CEPH CSI CephFS plugin resource requirement list. | | -| `csi.cephfsGrpcMetricsPort` | CSI CephFS driver GRPC metrics port. | `9091` | -| `csi.cephfsLivenessMetricsPort` | CSI CephFS driver metrics port. | `9081` | -| `csi.rbdGrpcMetricsPort` | Ceph CSI RBD driver GRPC metrics port. | `9090` | -| `csi.rbdLivenessMetricsPort` | Ceph CSI RBD driver metrics port. | `8080` | -| `csi.forceCephFSKernelClient` | Enable Ceph Kernel clients on kernel < 4.17 which support quotas for Cephfs. | `true` | -| `csi.kubeletDirPath` | Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag) | `/var/lib/kubelet` | -| `csi.cephcsi.image` | Ceph CSI image. | `quay.io/cephcsi/cephcsi:v3.4.0` | -| `csi.rbdPluginUpdateStrategy` | CSI Rbd plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. | `OnDelete` | -| `csi.cephFSPluginUpdateStrategy` | CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. | `OnDelete` | -| `csi.registrar.image` | Kubernetes CSI registrar image. | `k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0` | -| `csi.resizer.image` | Kubernetes CSI resizer image. | `k8s.gcr.io/sig-storage/csi-resizer:v1.2.0` | -| `csi.provisioner.image` | Kubernetes CSI provisioner image. | `k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2` | -| `csi.snapshotter.image` | Kubernetes CSI snapshotter image. | `k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1` | -| `csi.attacher.image` | Kubernetes CSI Attacher image. | `k8s.gcr.io/sig-storage/csi-attacher:v3.2.1` | -| `csi.cephfsPodLabels` | Labels to add to the CSI CephFS Pods. | | -| `csi.rbdPodLabels` | Labels to add to the CSI RBD Pods. | | -| `csi.volumeReplication.enabled` | Enable Volume Replication. | `false` | -| `csi.volumeReplication.image` | Volume Replication Controller image. | `quay.io/csiaddons/volumereplication-operator:v0.1.0` | -| `agent.flexVolumeDirPath` | Path where the Rook agent discovers the flex volume plugins (*) | `/usr/libexec/kubernetes/kubelet-plugins/volume/exec/` | -| `agent.libModulesDirPath` | Path where the Rook agent should look for kernel modules (*) | `/lib/modules` | -| `agent.mounts` | Additional paths to be mounted in the agent container (**) | | -| `agent.mountSecurityMode` | Mount Security Mode for the agent. | `Any` | -| `agent.priorityClassName` | The priority class name to add to the agent pods | | -| `agent.toleration` | Toleration for the agent pods | | -| `agent.tolerationKey` | The specific key of the taint to tolerate | | -| `agent.tolerations` | Array of tolerations in YAML format which will be added to agent deployment | | -| `agent.nodeAffinity` | The node labels for affinity of `rook-agent` (***) | | -| `admissionController.tolerations` | Array of tolerations in YAML format which will be added to admission controller deployment. | | -| `admissionController.nodeAffinity` | The node labels for affinity of the admission controller deployment (***) | | -| `allowMultipleFilesystems` | **(experimental)** Allows multiple filesystems to be deployed to a Ceph cluster. Octopus (v15) or Nautilus (v14) | `false` | - -* For information on what to set `agent.flexVolumeDirPath` to, please refer to the [Rook flexvolume documentation](flexvolume.md) - -* * `agent.mounts` should have this format `mountname1=/host/path:/container/path,mountname2=/host/path2:/container/path2` - -* * * `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_) - -### Command Line - -You can pass the settings with helm command line parameters. Specify each parameter using the -`--set key=value[,key=value]` argument to `helm install`. - -### Settings File - -Alternatively, a yaml file that specifies the values for the above parameters (`values.yaml`) can be provided while installing the chart. - -```console -helm install --namespace rook-ceph rook-ceph rook-release/rook-ceph -f values.yaml -``` - -For example settings, see [values.yaml](https://github.com/rook/rook/tree/{{ branchName }}/cluster/charts/rook-ceph/values.yaml) diff --git a/Documentation/helm.md b/Documentation/helm.md deleted file mode 100644 index 4b919d778..000000000 --- a/Documentation/helm.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Helm Charts -weight: 10000 ---- - -{% include_relative branch.liquid %} - -# Helm Charts - -Rook has published the following Helm charts for the Ceph storage provider: - -* [Rook Ceph Operator](helm-operator.md): Starts the Ceph Operator, which will watch for Ceph CRs (custom resources) -* [Rook Ceph Cluster](helm-ceph-cluster.md): Creates Ceph CRs that the operator will use to configure the cluster - -The Helm charts are intended to simplify deployment and upgrades. -Configuring the Rook resources without Helm is also fully supported by creating the -[manifests](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes) -directly. diff --git a/Documentation/k8s-pre-reqs.md b/Documentation/k8s-pre-reqs.md index df45dc19f..0e2b174e0 100644 --- a/Documentation/k8s-pre-reqs.md +++ b/Documentation/k8s-pre-reqs.md @@ -7,153 +7,11 @@ weight: 1000 # Prerequisites Rook can be installed on any existing Kubernetes cluster as long as it meets the minimum version -and Rook is granted the required privileges (see below for more information). If you don't have a Kubernetes cluster, -you can quickly set one up using [Minikube](#minikube), [Kubeadm](#kubeadm) or [CoreOS/Vagrant](#new-local-kubernetes-cluster-with-vagrant). +and Rook is granted the required privileges (see below for more information). ## Minimum Version -Kubernetes **v1.11** or higher is supported for the Ceph operator. -Kubernetes **v1.16** or higher is supported for the Cassandra and NFS operators. - -**Important** If you are using K8s 1.15 or older, you will need to create a different version of the Ceph CRDs. Create the `crds.yaml` found in the [pre-k8s-1.16](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/pre-k8s-1.16) subfolder of the example manifests. - -## Ceph Prerequisites - -See also **[Ceph Prerequisites](ceph-prerequisites.md)**. - -## Pod Security Policies - -Rook requires privileges to manage the storage in your cluster. If you have Pod Security Policies enabled -please review this section. By default, Kubernetes clusters do not have PSPs enabled so you may -be able to skip this section. - -If you are configuring Ceph on OpenShift, the Ceph walkthrough will configure the PSPs as well -when you start the operator with [operator-openshift.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/operator-openshift.yaml). - -### Cluster Role - -> **NOTE**: Cluster role configuration is only needed when you are not already `cluster-admin` in your Kubernetes cluster! - -Creating the Rook operator requires privileges for setting up RBAC. To launch the operator you need to have created your user certificate that is bound to ClusterRole `cluster-admin`. - -One simple way to achieve it is to assign your certificate with the `system:masters` group: - -```console --subj "/CN=admin/O=system:masters" -``` - -`system:masters` is a special group that is bound to `cluster-admin` ClusterRole, but it can't be easily revoked so be careful with taking that route in a production setting. -Binding individual certificate to ClusterRole `cluster-admin` is revocable by deleting the ClusterRoleBinding. - -### RBAC for PodSecurityPolicies - -If you have activated the [PodSecurityPolicy Admission Controller](https://kubernetes.io/docs/admin/admission-controllers/#podsecuritypolicy) and thus are -using [PodSecurityPolicies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/), you will require additional `(Cluster)RoleBindings` -for the different `ServiceAccounts` Rook uses to start the Rook Storage Pods. - -Security policies will differ for different backends. See Ceph's Pod Security Policies set up in -[common.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/common.yaml) -for an example of how this is done in practice. - -### PodSecurityPolicy - -You need at least one `PodSecurityPolicy` that allows privileged `Pod` execution. Here is an example -which should be more permissive than is needed for any backend: - -```yaml -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: privileged -spec: - fsGroup: - rule: RunAsAny - privileged: true - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' - allowedCapabilities: - - '*' - hostPID: true - # hostNetwork is required for using host networking - hostNetwork: false -``` - -**Hint**: Allowing `hostNetwork` usage is required when using `hostNetwork: true` in a Cluster `CustomResourceDefinition`! -You are then also required to allow the usage of `hostPorts` in the `PodSecurityPolicy`. The given -port range will allow all ports: - -```yaml - hostPorts: - # Ceph msgr2 port - - min: 1 - max: 65535 -``` - -## Authenticated docker registries - -If you want to use an image from authenticated docker registry (e.g. for image cache/mirror), you'll need to -add an `imagePullSecret` to all relevant service accounts. This way all pods created by the operator (for service account: -`rook-ceph-system`) or all new pods in the namespace (for service account: `default`) will have the `imagePullSecret` added -to their spec. - -The whole process is described in the [official kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account). - -### Example setup for a ceph cluster - -To get you started, here's a quick rundown for the ceph example from the [quickstart guide](/Documentation/ceph-quickstart.md). - -First, we'll create the secret for our registry as described [here](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod): - -```console -# for namespace rook-ceph -$ kubectl -n rook-ceph create secret docker-registry my-registry-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL - -# and for namespace rook-ceph (cluster) -$ kubectl -n rook-ceph create secret docker-registry my-registry-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL -``` - -Next we'll add the following snippet to all relevant service accounts as described [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account): - -```yaml -imagePullSecrets: -- name: my-registry-secret -``` - -The service accounts are: - -* `rook-ceph-system` (namespace: `rook-ceph`): Will affect all pods created by the rook operator in the `rook-ceph` namespace. -* `default` (namespace: `rook-ceph`): Will affect most pods in the `rook-ceph` namespace. -* `rook-ceph-mgr` (namespace: `rook-ceph`): Will affect the MGR pods in the `rook-ceph` namespace. -* `rook-ceph-osd` (namespace: `rook-ceph`): Will affect the OSD pods in the `rook-ceph` namespace. - -You can do it either via e.g. `kubectl -n edit serviceaccount default` or by modifying the [`operator.yaml`](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/operator.yaml) -and [`cluster.yaml`](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster.yaml) before deploying them. - -Since it's the same procedure for all service accounts, here is just one example: - -```console -kubectl -n rook-ceph edit serviceaccount default -``` - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: default - namespace: rook-ceph -secrets: -- name: default-token-12345 -imagePullSecrets: # here are the new -- name: my-registry-secret # parts -``` - -After doing this for all service accounts all pods should be able to pull the image from your registry. +Kubernetes **v1.16** or higher is supported for the Cassandra operator. ## Bootstrapping Kubernetes diff --git a/Documentation/media/ceph-dashboard.png b/Documentation/media/ceph-dashboard.png deleted file mode 100644 index f55da2e76..000000000 Binary files a/Documentation/media/ceph-dashboard.png and /dev/null differ diff --git a/Documentation/media/edgefs-isgw-edit.png b/Documentation/media/edgefs-isgw-edit.png deleted file mode 100644 index 8bbce92f1..000000000 Binary files a/Documentation/media/edgefs-isgw-edit.png and /dev/null differ diff --git a/Documentation/media/edgefs-isgw.png b/Documentation/media/edgefs-isgw.png deleted file mode 100644 index af413c741..000000000 Binary files a/Documentation/media/edgefs-isgw.png and /dev/null differ diff --git a/Documentation/media/edgefs-rook.png b/Documentation/media/edgefs-rook.png deleted file mode 100644 index a62a6f9d9..000000000 Binary files a/Documentation/media/edgefs-rook.png and /dev/null differ diff --git a/Documentation/media/edgefs-ui-dashboard.png b/Documentation/media/edgefs-ui-dashboard.png deleted file mode 100644 index 31c589b4e..000000000 Binary files a/Documentation/media/edgefs-ui-dashboard.png and /dev/null differ diff --git a/Documentation/media/edgefs-ui-nfs-edit.png b/Documentation/media/edgefs-ui-nfs-edit.png deleted file mode 100644 index fad41d819..000000000 Binary files a/Documentation/media/edgefs-ui-nfs-edit.png and /dev/null differ diff --git a/Documentation/media/kubernetes.png b/Documentation/media/kubernetes.png deleted file mode 100644 index 6372a1f7a..000000000 Binary files a/Documentation/media/kubernetes.png and /dev/null differ diff --git a/Documentation/media/minio_demo.png b/Documentation/media/minio_demo.png deleted file mode 100644 index 64c6dfad0..000000000 Binary files a/Documentation/media/minio_demo.png and /dev/null differ diff --git a/Documentation/media/nfs-webhook-deployment.png b/Documentation/media/nfs-webhook-deployment.png deleted file mode 100644 index df3cfe4b4..000000000 Binary files a/Documentation/media/nfs-webhook-deployment.png and /dev/null differ diff --git a/Documentation/media/nfs-webhook-validation-flow.png b/Documentation/media/nfs-webhook-validation-flow.png deleted file mode 100644 index 67f7c8a61..000000000 Binary files a/Documentation/media/nfs-webhook-validation-flow.png and /dev/null differ diff --git a/Documentation/media/prometheus-execute-metric-cursor.png b/Documentation/media/prometheus-execute-metric-cursor.png deleted file mode 100644 index 992a866c7..000000000 Binary files a/Documentation/media/prometheus-execute-metric-cursor.png and /dev/null differ diff --git a/Documentation/media/prometheus-graph.png b/Documentation/media/prometheus-graph.png deleted file mode 100644 index 6ce929f1d..000000000 Binary files a/Documentation/media/prometheus-graph.png and /dev/null differ diff --git a/Documentation/media/prometheus-metric-cursor-graph.png b/Documentation/media/prometheus-metric-cursor-graph.png deleted file mode 100644 index 10bb95736..000000000 Binary files a/Documentation/media/prometheus-metric-cursor-graph.png and /dev/null differ diff --git a/Documentation/media/prometheus-metric-cursor.png b/Documentation/media/prometheus-metric-cursor.png deleted file mode 100644 index 4985af49e..000000000 Binary files a/Documentation/media/prometheus-metric-cursor.png and /dev/null differ diff --git a/Documentation/media/prometheus-monitor.png b/Documentation/media/prometheus-monitor.png deleted file mode 100644 index 1f161ac07..000000000 Binary files a/Documentation/media/prometheus-monitor.png and /dev/null differ diff --git a/Documentation/media/rook-architecture.png b/Documentation/media/rook-architecture.png deleted file mode 100644 index 2118717cf..000000000 Binary files a/Documentation/media/rook-architecture.png and /dev/null differ diff --git a/Documentation/nfs-crd.md b/Documentation/nfs-crd.md deleted file mode 100644 index 72b355f08..000000000 --- a/Documentation/nfs-crd.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: NFS Server CRD -weight: 8000 ---- - -# NFS Server CRD - -NFS Server can be created and configured using the `nfsservers.nfs.rook.io` custom resource definition (CRD). -Please refer to the [user guide walk-through](nfs.md) for complete instructions. -This page will explain all the available configuration options on the NFS CRD. - -## Sample - -The parameters to configure the NFS CRD are demonstrated in the example below which is followed by a table that explains the parameters in more detail. - -Below is a very simple example that shows sharing a volume (which could be hostPath, cephFS, cephRBD, googlePD, EBS, etc.) using NFS, without any client or per export based configuration. - -For a `PersistentVolumeClaim` named `googlePD-claim`, which has Read/Write permissions and no squashing, the NFS CRD instance would look like the following: - -```yaml -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: nfs-share - server: - accessMode: ReadWrite - squash: none - persistentVolumeClaim: - claimName: googlePD-claim - # A key/value list of annotations - annotations: - # key: value -``` - -## Settings - -The table below explains in detail each configuration option that is available in the NFS CRD. - -| Parameter | Description | Default | -| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -| `replicas` | The number of NFS daemon to start | `1` | -| `annotations` | Key value pair list of annotations to add. | `[]` | -| `exports` | Parameters for creating an export | `` | -| `exports.name` | Name of the volume being shared | `` | -| `exports.server` | NFS server configuration | `` | -| `exports.server.accessMode` | Volume access modes (Reading and Writing) for the share (Valid options are `ReadOnly`, `ReadWrite` and `none`) | `ReadWrite` | -| `exports.server.squash` | This prevents root users connected remotely from having root privileges (valid options are `none`, `rootId`, `root` and `all`) | `none` | -| `exports.server.allowedClients` | Access configuration for clients that can consume the NFS volume | `` | -| `exports.server.allowedClients.name` | Name of the host/hosts | `` | -| `exports.server.allowedClients.clients` | The host or network to which the export is being shared. Valid entries for this field are host names, IP addresses, netgroups, and CIDR network addresses. | `` | -| `exports.server.allowedClients.accessMode` | Reading and Writing permissions for the client* (valid options are same as `exports.server.accessMode`) | `ReadWrite` | -| `exports.server.allowedClients.squash` | Squash option for the client* (valid options are same as `exports.server.squash`) | `none` | -| `exports.persistentVolumeClaim` | The PVC that will serve as the backing volume to be exported by the NFS server. Any PVC is allowed, such as host paths, CephFS, Ceph RBD, Google PD, Amazon EBS, etc.. | `` | -| `exports.persistentVolumeClaim.claimName` | Name of the PVC | `` | - -*note: if `exports.server.allowedClients.accessMode` and `exports.server.allowedClients.squash` options are specified, `exports.server.accessMode` and `exports.server.squash` are overridden respectively. - -Description for `volumes.allowedClients.squash` valid options are: - -| Option | Description | -| -------- | --------------------------------------------------------------------------------- | -| `none` | No user id squashing is performed | -| `rootId` | UID `0` and GID `0` are squashed to the anonymous uid and anonymous GID. | -| `root` | UID `0` and GID of any value are squashed to the anonymous uid and anonymous GID. | -| `all` | All users are squashed | - -The volume that needs to be exported by NFS must be attached to NFS server pod via PVC. Examples of volume that can be attached are Host Path, AWS Elastic Block Store, GCE Persistent Disk, CephFS, RBD etc. The limitations of these volumes also apply while they are shared by NFS. The limitation and other details about these volumes can be found [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). - -## Examples - -This section contains some examples for more advanced scenarios and configuration options. - -### Single volume exported for access by multiple clients - -This example shows how to share a volume with different options for different clients accessing the share. -The EBS volume (represented by a PVC) will be exported by the NFS server for client access as `/nfs-share` (note that this PVC must already exist). - -The following client groups are allowed to access this share: - -* `group1` with IP address `172.17.0.5` will be given Read Only access with the root user squashed. -* `group2` includes both the network range of `172.17.0.5/16` and a host named `serverX`. They will all be granted Read/Write permissions with no user squash. - -```yaml -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: nfs-share - server: - allowedClients: - - name: group1 - clients: 172.17.0.5 - accessMode: ReadOnly - squash: root - - name: group2 - clients: - - 172.17.0.0/16 - - serverX - accessMode: ReadWrite - squash: none - persistentVolumeClaim: - claimName: ebs-claim -``` - -### Multiple volumes - -This section provides an example of how to share multiple volumes from one NFS server. -These volumes can all be different types (e.g., Google PD and Ceph RBD). -Below we will share an Amazon EBS volume as well as a CephFS volume, using differing configuration for the two: - -* The EBS volume is named `share1` and is available for all clients with Read Only access and no squash. -* The CephFS volume is named `share2` and is available for all clients with Read/Write access and no squash. - -```yaml -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-multi-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: share1 - server: - allowedClients: - - name: ebs-host - clients: all - accessMode: ReadOnly - squash: none - persistentVolumeClaim: - claimName: ebs-claim - - name: share2 - server: - allowedClients: - - name: ceph-host - clients: all - accessMode: ReadWrite - squash: none - persistentVolumeClaim: - claimName: cephfs-claim -``` diff --git a/Documentation/nfs.md b/Documentation/nfs.md deleted file mode 100644 index 3656c97c7..000000000 --- a/Documentation/nfs.md +++ /dev/null @@ -1,602 +0,0 @@ ---- -title: Network Filesystem (NFS) -weight: 800 -indent: true ---- -{% include_relative branch.liquid %} - -# Network Filesystem (NFS) - -NFS allows remote hosts to mount filesystems over a network and interact with those filesystems as though they are mounted locally. This enables system administrators to consolidate resources onto centralized servers on the network. - -## Prerequisites - -1. A Kubernetes cluster (v1.16 or higher) is necessary to run the Rook NFS operator. To make sure you have a Kubernetes cluster that is ready for `Rook`, you can [follow these instructions](k8s-pre-reqs.md). -2. The desired volume to export needs to be attached to the NFS server pod via a [PVC](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). -Any type of PVC can be attached and exported, such as Host Path, AWS Elastic Block Store, GCP Persistent Disk, CephFS, Ceph RBD, etc. -The limitations of these volumes also apply while they are shared by NFS. -You can read further about the details and limitations of these volumes in the [Kubernetes docs](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). -3. NFS client packages must be installed on all nodes where Kubernetes might run pods with NFS mounted. Install `nfs-utils` on CentOS nodes or `nfs-common` on Ubuntu nodes. - -## Deploy NFS Operator - -First deploy the Rook NFS operator using the following commands: - -```console -$ git clone --single-branch --branch v1.7.2 https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/nfs -kubectl create -f crds.yaml -kubectl create -f operator.yaml -``` - -You can check if the operator is up and running with: - -```console -kubectl -n rook-nfs-system get pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-nfs-operator-879f5bf8b-gnwht 1/1 Running 0 29m ->``` - -## Deploy NFS Admission Webhook (Optional) - -Admission webhooks are HTTP callbacks that receive admission requests to the API server. Two types of admission webhooks is validating admission webhook and mutating admission webhook. NFS Operator support validating admission webhook which validate the NFSServer object sent to the API server before stored in the etcd (persisted). - -To enable admission webhook on NFS such as validating admission webhook, you need to do as following: - -First, ensure that `cert-manager` is installed. If it is not installed yet, you can install it as described in the `cert-manager` [installation](https://cert-manager.io/docs/installation/kubernetes/) documentation. Alternatively, you can simply just run the single command below: - -```console -kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.15.1/cert-manager.yaml -``` - -This will easily get the latest version (`v0.15.1`) of `cert-manager` installed. After that completes, make sure the cert-manager component deployed properly and is in the `Running` status: - -```console -kubectl get -n cert-manager pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->cert-manager-7747db9d88-jmw2f 1/1 Running 0 2m1s ->cert-manager-cainjector-87c85c6ff-dhtl8 1/1 Running 0 2m1s ->cert-manager-webhook-64dc9fff44-5g565 1/1 Running 0 2m1s ->``` - -Once `cert-manager` is running, you can now deploy the NFS webhook: - -```console -kubectl create -f webhook.yaml -``` - -Verify the webhook is up and running: - -```console -kubectl -n rook-nfs-system get pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-nfs-operator-78d86bf969-k7lqp 1/1 Running 0 102s ->rook-nfs-webhook-74749cbd46-6jw2w 1/1 Running 0 102s ->``` - -## Create Openshift Security Context Constraints (Optional) - -On OpenShift clusters, we will need to create some additional security context constraints. If you are **not** running in OpenShift you can skip this and go to the [next section](#create-and-initialize-nfs-server). - -To create the security context constraints for nfs-server pods, we can use the following yaml, which is also found in `scc.yaml` under `/cluster/examples/kubernetes/nfs`. - -> *NOTE: Older versions of OpenShift may require ```apiVersion: v1```* - -```yaml -kind: SecurityContextConstraints -apiVersion: security.openshift.io/v1 -metadata: - name: rook-nfs -allowHostDirVolumePlugin: true -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegedContainer: false -allowedCapabilities: -- SYS_ADMIN -- DAC_READ_SEARCH -defaultAddCapabilities: null -fsGroup: - type: MustRunAs -priority: null -readOnlyRootFilesystem: false -requiredDropCapabilities: -- KILL -- MKNOD -- SYS_CHROOT -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -volumes: -- configMap -- downwardAPI -- emptyDir -- persistentVolumeClaim -- secret -users: - - system:serviceaccount:rook-nfs:rook-nfs-server -``` - -You can create scc with following command: - -```console -oc create -f scc.yaml -``` - -## Create Pod Security Policies (Recommended) - -We recommend you to create Pod Security Policies as well - -```yaml -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: rook-nfs-policy -spec: - privileged: true - fsGroup: - rule: RunAsAny - allowedCapabilities: - - DAC_READ_SEARCH - - SYS_RESOURCE - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret - - hostPath -``` - -Save this file with name `psp.yaml` and create with following command: - -```console -kubectl create -f psp.yaml -``` - -## Create and Initialize NFS Server - -Now that the operator is running, we can create an instance of a NFS server by creating an instance of the `nfsservers.nfs.rook.io` resource. -The various fields and options of the NFS server resource can be used to configure the server and its volumes to export. -Full details of the available configuration options can be found in the [NFS CRD documentation](nfs-crd.md). - -Before we create NFS Server we need to create `ServiceAccount` and `RBAC` rules - -```yaml ---- -apiVersion: v1 -kind: Namespace -metadata: - name: rook-nfs ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-server - namespace: rook-nfs ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-provisioner-runner -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "update", "patch"] - - apiGroups: [""] - resources: ["services", "endpoints"] - verbs: ["get"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - resourceNames: ["rook-nfs-policy"] - verbs: ["use"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: - - nfs.rook.io - resources: - - "*" - verbs: - - "*" ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-provisioner-runner -subjects: - - kind: ServiceAccount - name: rook-nfs-server - # replace with namespace where provisioner is deployed - namespace: rook-nfs -roleRef: - kind: ClusterRole - name: rook-nfs-provisioner-runner - apiGroup: rbac.authorization.k8s.io -``` - -Save this file with name `rbac.yaml` and create with following command: - -```console -kubectl create -f rbac.yaml -``` - -This guide has 3 main examples that demonstrate exporting volumes with a NFS server: - -1. [Default StorageClass example](#default-storageclass-example) -1. [XFS StorageClass example](#xfs-storageclass-example) -1. [Rook Ceph volume example](#rook-ceph-volume-example) - -### Default StorageClass example - -This first example will walk through creating a NFS server instance that exports storage that is backed by the default `StorageClass` for the environment you happen to be running in. -In some environments, this could be a host path, in others it could be a cloud provider virtual disk. -Either way, this example requires a default `StorageClass` to exist. - -Start by saving the below NFS CRD instance definition to a file called `nfs.yaml`: - -```yaml ---- -# A default storageclass must be present -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-default-claim - namespace: rook-nfs -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - persistentVolumeClaim: - claimName: nfs-default-claim - # A key/value list of annotations - annotations: - rook: nfs -``` - -With the `nfs.yaml` file saved, now create the NFS server as shown: - -```console -kubectl create -f nfs.yaml -``` - -### XFS StorageClass example - -Rook NFS support disk quota through `xfs_quota`. So if you need specify disk quota for your volumes you can follow this example. - -In this example, we will use an underlying volume mounted as `xfs` with `prjquota` option. Before you can create that underlying volume, you need to create `StorageClass` with `xfs` filesystem and `prjquota` mountOptions. Many distributed storage providers for Kubernetes support `xfs` filesystem. Typically by defining `fsType: xfs` or `fs: xfs` in storageClass parameters. But actually how to specify storage-class filesystem type is depend on the storage providers it self. You can see https://kubernetes.io/docs/concepts/storage/storage-classes/ for more details. - -Here is example `StorageClass` for GCE PD and AWS EBS - -- GCE PD - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: standard-xfs -parameters: - type: pd-standard - fsType: xfs -mountOptions: - - prjquota -provisioner: kubernetes.io/gce-pd -reclaimPolicy: Delete -volumeBindingMode: Immediate -allowVolumeExpansion: true -``` - -- AWS EBS - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: standard-xfs -provisioner: kubernetes.io/aws-ebs -parameters: - type: io1 - iopsPerGB: "10" - fsType: xfs -mountOptions: - - prjquota -reclaimPolicy: Delete -volumeBindingMode: Immediate -``` - -Once you already have `StorageClass` with `xfs` filesystem and `prjquota` mountOptions you can create NFS server instance with the following example. - -```yaml ---- -# A storage class with name standard-xfs must be present. -# The storage class must be has xfs filesystem type and prjquota mountOptions. -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-xfs-claim - namespace: rook-nfs -spec: - storageClassName: "standard-xfs" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - persistentVolumeClaim: - claimName: nfs-xfs-claim - # A key/value list of annotations - annotations: - rook: nfs -``` - -Save this PVC and NFS Server instance as `nfs-xfs.yaml` and create with following command. - -```console -kubectl create -f nfs-xfs.yaml -``` - -### Rook Ceph volume example - -In this alternative example, we will use a different underlying volume as an export for the NFS server. -These steps will walk us through exporting a Ceph RBD block volume so that clients can access it across the network. - -First, you have to [follow these instructions](ceph-quickstart.md) to deploy a sample Rook Ceph cluster that can be attached to the NFS server pod for sharing. -After the Rook Ceph cluster is up and running, we can create proceed with creating the NFS server. - -Save this PVC and NFS Server instance as `nfs-ceph.yaml`: - -```yaml ---- -# A rook ceph cluster must be running -# Create a rook ceph cluster using examples in rook/cluster/examples/kubernetes/ceph -# Refer to https://rook.io/docs/rook/master/ceph-quickstart.html for a quick rook cluster setup -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-ceph-claim - namespace: rook-nfs -spec: - storageClassName: rook-ceph-block - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - # Create a Ceph cluster for using this example - # Create a ceph PVC after creating the rook ceph cluster using ceph-pvc.yaml - persistentVolumeClaim: - claimName: nfs-ceph-claim - # A key/value list of annotations - annotations: - rook: nfs -``` - -Create the NFS server instance that you saved in `nfs-ceph.yaml`: - -```console -kubectl create -f nfs-ceph.yaml -``` - -### Verify NFS Server - -We can verify that a Kubernetes object has been created that represents our new NFS server and its export with the command below. - -```console -kubectl -n rook-nfs get nfsservers.nfs.rook.io -``` - ->``` ->NAME AGE STATE ->rook-nfs 32s Running ->``` - -Verify that the NFS server pod is up and running: - -```console -kubectl -n rook-nfs get pod -l app=rook-nfs -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-nfs-0 1/1 Running 0 2m ->``` - -If the NFS server pod is in the `Running` state, then we have successfully created an exported NFS share that clients can start to access over the network. - - -## Accessing the Export - -Since Rook version v1.0, Rook supports dynamic provisioning of NFS. -This example will be showing how dynamic provisioning feature can be used for nfs. - -Once the NFS Operator and an instance of NFSServer is deployed. A storageclass similar to below example has to be created to dynamically provisioning volumes. - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - labels: - app: rook-nfs - name: rook-nfs-share1 -parameters: - exportName: share1 - nfsServerName: rook-nfs - nfsServerNamespace: rook-nfs -provisioner: nfs.rook.io/rook-nfs-provisioner -reclaimPolicy: Delete -volumeBindingMode: Immediate -``` - -You can save it as a file, eg: called `sc.yaml` Then create storageclass with following command. - -```console -kubectl create -f sc.yaml -``` - -> **NOTE**: The StorageClass need to have the following 3 parameters passed. -> -1. `exportName`: It tells the provisioner which export to use for provisioning the volumes. -2. `nfsServerName`: It is the name of the NFSServer instance. -3. `nfsServerNamespace`: It namespace where the NFSServer instance is running. - -Once the above storageclass has been created, you can create a PV claim referencing the storageclass as shown in the example given below. - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: rook-nfs-pv-claim -spec: - storageClassName: "rook-nfs-share1" - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi -``` - -You can also save it as a file, eg: called `pvc.yaml` Then create PV claim with following command. - -```console -kubectl create -f pvc.yaml -``` - -## Consuming the Export - -Now we can consume the PV that we just created by creating an example web server app that uses the above `PersistentVolumeClaim` to claim the exported volume. -There are 2 pods that comprise this example: - -1. A web server pod that will read and display the contents of the NFS share -1. A writer pod that will write random data to the NFS share so the website will continually update - -Start both the busybox pod (writer) and the web server from the `cluster/examples/kubernetes/nfs` folder: - -```console -kubectl create -f busybox-rc.yaml -kubectl create -f web-rc.yaml -``` - -Let's confirm that the expected busybox writer pod and web server pod are **all** up and in the `Running` state: - -```console -kubectl get pod -l app=nfs-demo -``` - -In order to be able to reach the web server over the network, let's create a service for it: - -```console -kubectl create -f web-service.yaml -``` - -We can then use the busybox writer pod we launched before to check that nginx is serving the data appropriately. -In the below 1-liner command, we use `kubectl exec` to run a command in the busybox writer pod that uses `wget` to retrieve the web page that the web server pod is hosting. As the busybox writer pod continues to write a new timestamp, we should see the returned output also update every ~10 seconds or so. - -```console -$ echo; kubectl exec $(kubectl get pod -l app=nfs-demo,role=busybox -o jsonpath='{.items[0].metadata.name}') -- wget -qO- http://$(kubectl get services nfs-web -o jsonpath='{.spec.clusterIP}'); echo -``` - ->``` ->Thu Oct 22 19:28:55 UTC 2015 ->nfs-busybox-w3s4t ->``` - -## Teardown - -To clean up all resources associated with this walk-through, you can run the commands below. - -```console -kubectl delete -f web-service.yaml -kubectl delete -f web-rc.yaml -kubectl delete -f busybox-rc.yaml -kubectl delete -f pvc.yaml -kubectl delete -f pv.yaml -kubectl delete -f nfs.yaml -kubectl delete -f nfs-xfs.yaml -kubectl delete -f nfs-ceph.yaml -kubectl delete -f rbac.yaml -kubectl delete -f psp.yaml -kubectl delete -f scc.yaml # if deployed -kubectl delete -f operator.yaml -kubectl delete -f webhook.yaml # if deployed -kubectl delete -f crds.yaml -``` - -## Troubleshooting - -If the NFS server pod does not come up, the first step would be to examine the NFS operator's logs: - -```console -kubectl -n rook-nfs-system logs -l app=rook-nfs-operator -``` diff --git a/Documentation/quickstart.md b/Documentation/quickstart.md index c04e1d484..845573641 100644 --- a/Documentation/quickstart.md +++ b/Documentation/quickstart.md @@ -1,21 +1,206 @@ --- -title: Quickstart -weight: 200 +title: Cassandra Quickstart +weight: 2000 --- +{% include_relative branch.liquid %} -# Quickstart Guides +# Cassandra Quickstart -Welcome to Rook! We hope you have a great experience installing the Rook **cloud-native storage orchestrator** platform to enable highly available, durable storage -in your Kubernetes cluster. +Welcome to Rook! We hope you have a great experience installing the Rook **cloud-native storage orchestrator** platform to enable highly available, durable storage in your Kubernetes cluster. + +[Cassandra](http://cassandra.apache.org/) is a highly available, fault tolerant, peer-to-peer NoSQL database featuring lightning fast performance and tunable consistency. It provides massive scalability with no single point of failure. + +[Scylla](https://www.scylladb.com) is a close-to-the-hardware rewrite of Cassandra in C++. It features a shared nothing architecture that enables true linear scaling and major hardware optimizations that achieve ultra-low latencies and extreme throughput. It is a drop-in replacement for Cassandra and uses the same interfaces, so it is also supported by Rook. If you have any questions along the way, please don't hesitate to ask us in our [Slack channel](https://rook-io.slack.com). You can sign up for our Slack [here](https://slack.rook.io). -Rook provides a growing number of storage providers to a Kubernetes cluster, each with its own operator to deploy and manage the resources for the storage provider. +## Deploy Cassandra Operator + +First deploy the Rook Cassandra Operator using the following commands: + +```console +$ git clone --single-branch --branch v1.7.2 https://github.com/rook/cassandra.git +cd rook/cluster/examples/kubernetes/cassandra +kubectl apply -f crds.yaml +kubectl apply -f operator.yaml +``` + +This will install the operator in namespace rook-cassandra-system. You can check if the operator is up and running with: + +```console +kubectl -n rook-cassandra-system get pod +``` + +## Create and Initialize a Cassandra/Scylla Cluster + +Now that the operator is running, we can create an instance of a Cassandra/Scylla cluster by creating an instance of the `clusters.cassandra.rook.io` resource. +Some of that resource's values are configurable, so feel free to browse `cluster.yaml` and tweak the settings to your liking. +Full details for all the configuration options can be found in the [Cassandra Cluster CRD documentation](cassandra-cluster-crd.md). + +When you are ready to create a Cassandra cluster, simply run: + +```console +kubectl create -f cluster.yaml +``` + +We can verify that a Kubernetes object has been created that represents our new Cassandra cluster with the command below. +This is important because it shows that Rook has successfully extended Kubernetes to make Cassandra clusters a first class citizen in the Kubernetes cloud-native environment. + +```console +kubectl -n rook-cassandra get clusters.cassandra.rook.io +``` + +To check if all the desired members are running, you should see the same number of entries from the following command as the number of members that was specified in `cluster.yaml`: + +```console +kubectl -n rook-cassandra get pod -l app=rook-cassandra +``` + +You can also track the state of a Cassandra cluster from its status. To check the current status of a Cluster, run: + +```console +kubectl -n rook-cassandra describe clusters.cassandra.rook.io rook-cassandra +``` + +## Accessing the Database + +* From kubectl: + +To get a `cqlsh` shell in your new Cluster: + +```console +kubectl exec -n rook-cassandra -it rook-cassandra-east-1-east-1a-0 -- cqlsh +> DESCRIBE KEYSPACES; +``` + +* From inside a Pod: + +When you create a new Cluster, Rook automatically creates a Service for the clients to use in order to access the Cluster. The service's name follows the convention `-client`. You can see this Service in you cluster by running: + +```console +kubectl -n rook-cassandra describe service rook-cassandra-client +``` + +Pods running inside the Kubernetes cluster can use this Service to connect to Cassandra. +Here's an example using the [Python Driver](https://github.com/datastax/python-driver): + +```python +from cassandra.cluster import Cluster + +cluster = Cluster(['rook-cassandra-client.rook-cassandra.svc.cluster.local']) +session = cluster.connect() +``` + +## Scale Up + +The operator supports scale up of a rack as well as addition of new racks. To make the changes, you can use: + +```console +kubectl edit clusters.cassandra.rook.io rook-cassandra +``` + +* To scale up a rack, change the `Spec.Members` field of the rack to the desired value. +* To add a new rack, append the `racks` list with a new rack. Remember to choose a different rack name for the new rack. +* After editing and saving the yaml, check your cluster's Status and Events for information on what's happening: + +```console +kubectl -n rook-cassandra describe clusters.cassandra.rook.io rook-cassandra +``` + + +## Scale Down + +The operator supports scale down of a rack. To make the changes, you can use: + +```console +kubectl edit clusters.cassandra.rook.io rook-cassandra +``` + +* To scale down a rack, change the `Spec.Members` field of the rack to the desired value. +* After editing and saving the yaml, check your cluster's Status and Events for information on what's happening: + +```console +kubectl -n rook-cassandra describe clusters.cassandra.rook.io rook-cassandra +``` + +## Clean Up + +To clean up all resources associated with this walk-through, you can run the commands below. + +> **NOTE**: that this will destroy your database and delete all of its associated data. + +```console +kubectl delete -f cluster.yaml +kubectl delete -f operator.yaml +kubectl delete -f crds.yaml +``` + +## Troubleshooting + +If the cluster does not come up, the first step would be to examine the operator's logs: + +```console +kubectl -n rook-cassandra-system logs -l app=rook-cassandra-operator +``` + +If everything looks OK in the operator logs, you can also look in the logs for one of the Cassandra instances: + +```console +kubectl -n rook-cassandra logs rook-cassandra-0 +``` + +## Cassandra Monitoring + +To enable jmx_exporter for cassandra rack, you should specify `jmxExporterConfigMapName` option for rack in CassandraCluster CRD. + +For example: +```yaml +apiVersion: cassandra.rook.io/v1alpha1 +kind: Cluster +metadata: + name: my-cassandra + namespace: rook-cassandra +spec: + ... + datacenter: + name: my-datacenter + racks: + - name: my-rack + members: 3 + jmxExporterConfigMapName: jmx-exporter-settings + storage: + volumeClaimTemplates: + - metadata: + name: rook-cassandra-data + spec: + storageClassName: my-storage-class + resources: + requests: + storage: 200Gi +``` + +Simple config map example to get all metrics: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: jmx-exporter-settings + namespace: rook-cassandra +data: + jmx_exporter_config.yaml: | + lowercaseOutputLabelNames: true + lowercaseOutputName: true + whitelistObjectNames: ["org.apache.cassandra.metrics:*"] +``` + +ConfigMap's data field must contain `jmx_exporter_config.yaml` key with jmx exporter settings. -**Follow these guides to get started with each provider**: +There is no automatic reloading mechanism for pods when the config map updated. +After the configmap changed, you should restart all rack pods manually: -| Storage Provider | Status | Description | -| -------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| [Ceph](ceph-quickstart.md) | Stable / V1 | Ceph is a highly scalable distributed storage solution for block storage, object storage, and shared filesystems with years of production deployments. | -| [Cassandra](cassandra.md) | Alpha | Cassandra is a highly available NoSQL database featuring lightning fast performance, tunable consistency and massive scalability. | -| [NFS](nfs.md) | Alpha | NFS allows remote hosts to mount filesystems over a network and interact with those filesystems as though they are mounted locally. | +```bash +NAMESPACE= +CLUSTER= +RACKS=$(kubectl get sts -n ${NAMESPACE} -l "cassandra.rook.io/cluster=${CLUSTER}") +echo ${RACKS} | xargs -n1 kubectl rollout restart -n ${NAMESPACE} +``` diff --git a/Documentation/storage-providers.md b/Documentation/storage-providers.md deleted file mode 100644 index 67a5a7ada..000000000 --- a/Documentation/storage-providers.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Storage Providers -weight: 12050 -indent: true ---- - -# Storage Providers - -Rook is the home for operators for multiple storage providers. Each of these storage providers -has specific requirements and each of them is very independent. There is no runtime dependency -between the storage providers. Development is where the storage providers benefit from one another. - -Rook provides a development framework with a goal of enabling storage providers to create -operators for Kubernetes to manage their storage layer. As the storage provider community -grows, we expect this framework to grow as common storage constructs are identified -that will benefit the community. Rook does not aim to replace other frameworks or -communities, but to fill gaps not provided by other core projects. - -Storage providers in Rook are currently built on the [Controller Runtime](https://github.com/kubernetes-sigs/controller-runtime), -but may also be built on other frameworks such as the [Operator SDK](https://sdk.operatorframework.io/) -or [Kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). The choice of the -underlying framework is up to the storage provider. - -Rook does not aim to be a general framework for storage, but to provide a -very specific set of helpers to meet the storage provider needs in the Rook project. - -## Rook Framework - -Rook provides the following framework to assist storage providers in building an operator: - -* Common golang packages shared by storage providers are in the main [Rook repo](https://github.com/rook/rook). -* Common build scripts for building the operator images are in the main - [Rook repo](https://github.com/rook/rook/tree/master/build). -* Each provider has its own repo under the [Rook org](https://github.com/rook). - * Multiple community members are given push access to the repo, including - owners of the storage provider, Rook steering committee members, - and other Rook maintainers if deemed helpful or necessary by the steering - committee. Maintainers for the new provider are added according to the - [governance](https://github.com/rook/rook/blob/master/GOVERNANCE.md). - * Providers added to Rook prior to 2020 are grandfathered into the main - [Rook repo](https://github.com/rook/rook). -* Storage providers must follow the Rook [governance](https://github.com/rook/rook/blob/master/GOVERNANCE.md) - in the interest of the good of the overall project. Storage providers have - autonomy in their feature work, while collaboration with the community - is expected for shared features. -* A quarterly release cadence is in place for the operators in the main Rook repo. - Operators in their own repo define their own cadence and versioning scheme as desired. - * Storage providers own their release process, while following Rook best practices to - ensure high quality. - * Each provider owns independent CI based on Github actions, with patterns and build - automation that can be re-used by providers -* Docker images are pushed to the [Rook DockerHub](https://hub.docker.com/u/rook) where - each storage provider has its own repo. -* Helm charts are published to [charts.rook.io](https://charts.rook.io/release) -* Documentation for the storage provider is to be written by the storage provider - members. The build process will publish the documentation to the [Rook website](https://rook.github.io/docs/rook/master/). -* All storage providers are added to the [Rook.io website](https://rook.io/) -* A great Slack community is available where you can communicate amongst developers and users - -## Considering Joining Rook? - -If you own a storage provider and are interested in joining the Rook project to create -an operator, please consider the following: - -* You are making a clear commitment to the development of the storage provider. - Creating an operator is not a one-time engineering cost, but is a long term commitment - to the community. -* Support for a storage provider in Rook requires dedication and community support. -* Do you really need an operator? Many storage applications (e.g. CSI drivers) - can be deployed with tools such as a Helm chart and don't really need the - flexibility of an operator. -* Joining Rook is also about community, not just the framework. - -## Engineering Requirements - -The engineering costs of each storage provider include: - -* Develop the operator -* Rook maintainers will help answer questions along the way, but ultimately - you own the development -* If there are test failures in the CI, they should be investigated in a timely manner -* If issues are opened in Github, they need investigation and triage to provide - expectations about the priority and timeline -* If users have questions in Slack, they should be answered in a timely manner. - Community members can also be redirected to other locations if desired for the provider. -* A regular cadence of releases is expected. Software always needs to evolve with new versions - of K8s, accommodate new features in the storage provider, etc. -* Each provider maintains a ROADMAP.md in the root of their repo, updates it regularly - (e.g. quarterly or with the release cadence), and provides input to the overall Rook - [roadmap](https://github.com/rook/rook/blob/master/ROADMAP.md) for common features. - -### Inactive Providers - -If a storage provider does not have engineering resources, Rook cannot claim to support it. -After some months of inactivity Rook will deprecate a storage provider. The timing -will be decided on a case by case basis by the steering committee. The repo and other artifacts -for deprecated storage providers will be left intact for reference. diff --git a/Documentation/tectonic.md b/Documentation/tectonic.md deleted file mode 100644 index 23ec6440d..000000000 --- a/Documentation/tectonic.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Tectonic Configuration -weight: 11800 -indent: true ---- - -# Tectonic Configuration - -Here is a running guide on how to implement Rook on Tectonic. A complete guide on how to install Tectonic is out of the scope of the Rook project. More info can be found on the [Tectonic website](https://coreos.com/tectonic/docs/latest/) - -## Prerequisites - -* An installed tectonic-installer. These steps are described on [the Tectonic website](https://coreos.com/tectonic/docs/latest/install/bare-metal/#4-tectonic-installer) -* A running matchbox node which will do the provisioning (Matchbox is only required if you are running Tectonic on Bare metal) -* You can run through all steps of the GUI installer, but in the last step, choose `Boot manually`. This way we can make the necessary changes first. - -## Edit the kubelet.service file -We need to make a few adaptions to the Kubelet systemd service file generated by the Tectonic-installer. - -First change to the directory in which you untarred the tectonic installer and find your newly generated cluster configuration files. - -```console -cd ~/tectonic/tectonic-installer/LINUX-OR-DARWIN/clusters -``` - - -Open the file `modules/ignition/resources/services/kubelet.service` in your favorite editor and after the last line containing `ExecStartPre=...`, paste the following extra lines: - -```console -ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins -ExecStartPre=/bin/mkdir -p /var/lib/rook -``` - -And after the `ExecStart=/usr/lib/coreos/kubelet-wrapper \` line, insert the following flag for the kubelet-wrapper to point to a path reachable outside of the Kubelet rkt container: - -```console ---volume-plugin-dir=/var/lib/kubelet/volumeplugins \ -``` - -Save and close the file. - -### Boot your Tectonic cluster - -All the preparations are ready for Tectonic to boot now. We will use `terraform` to start the cluster. -Visit the official [Tectonic manual boot](https://coreos.com/tectonic/docs/latest/install/aws/manual-boot.html#deploy-the-cluster) page for the commands to use. - -**Remark:** The Tectonic installer contains the correct terraform binary out of the box. This terraform binary can be found in following directory `~/tectonic/tectonic-installer/linux`. - -## Start Rook - -After the Tectonic Installer ran and the Kubernetes cluster is started and ready, you can follow the [Rook installation guide](ceph-quickstart.md). -If you want to specify which disks Rook uses, follow the instructions in [creating Rook clusters](ceph-cluster-crd.md) diff --git a/INSTALL.md b/INSTALL.md index 9bed0b8b2..b9d703d48 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -22,21 +22,13 @@ The following tools are need on the host: ## Build -You can build the Rook binaries and all container images for the host platform by simply running the +You can build the Rook binaries and all container image for the host platform by simply running the command below. Building in parallel with the `-j` option is recommended. ```console make -j4 ``` -Developers may often wish to make only images for a particular backend in their testing. This can -be done by specifying the `IMAGES` environment variable with `make` as exemplified below. Possible -values for are as defined by sub-directory names in the `/rook/images/` dir. Multiple images can be separated by a space. - -```console -make -j4 IMAGES='ceph' build -``` - Run `make help` for more options. ## CI Workflow @@ -46,8 +38,6 @@ On every commit to PR and master the CI will build, run unit tests, and run inte If the build is for master or a release, the build will also be published to [dockerhub.com](https://cloud.docker.com/u/rook/repository/list). -> Note that if the pull request title follows Rook's [contribution guidelines](https://rook.io/docs/rook/master/development-flow.html#commit-structure), the CI will automatically run the appropriate test scenario. For example if a pull request title is "ceph: add a feature", then the tests for the Ceph storage provider will run. Similarly, tests will only run for a single provider with the "cassandra:" and "nfs:" prefixes. - ## Building for other platforms You can also run the build for all supported platforms: diff --git a/Makefile b/Makefile index 676d53ea7..2952978d3 100644 --- a/Makefile +++ b/Makefile @@ -62,13 +62,13 @@ SERVER_PLATFORMS := $(filter linux_%,$(PLATFORMS)) CLIENT_PLATFORMS := $(filter-out linux_%,$(PLATFORMS)) # server projects that we build on server platforms -SERVER_PACKAGES = $(GO_PROJECT)/cmd/rook $(GO_PROJECT)/cmd/rookflex +SERVER_PACKAGES = $(GO_PROJECT)/cmd/rook # tests packages that will be compiled into binaries TEST_PACKAGES = $(GO_PROJECT)/tests/integration # the root go project -GO_PROJECT=github.com/rook/rook +GO_PROJECT=github.com/rook/cassandra # inject the version number into the golang version package using the -X linker flag LDFLAGS += -X $(GO_PROJECT)/pkg/version.Version=$(VERSION) @@ -96,9 +96,6 @@ GO_TEST_FILTER=$(TESTFILTER) include build/makelib/golang.mk -# setup helm charts -include build/makelib/helm.mk - # ==================================================================================== # Targets @@ -106,7 +103,7 @@ build.version: @mkdir -p $(OUTPUT_DIR) @echo "$(VERSION)" > $(OUTPUT_DIR)/version -build.common: build.version helm.build mod.check +build.common: build.version mod.check @$(MAKE) go.init @$(MAKE) go.validate @@ -115,7 +112,7 @@ do.build.platform.%: do.build.parallel: $(foreach p,$(PLATFORMS), do.build.platform.$(p)) -build: csv-clean build.common ## Only build for linux platform +build: build.common ## Only build for linux platform @$(MAKE) go.build PLATFORM=linux_$(GOHOSTARCH) @$(MAKE) -C images PLATFORM=linux_$(GOHOSTARCH) @@ -153,7 +150,7 @@ codegen: ${CODE_GENERATOR} ## Run code generators. mod.check: go.mod.check ## Check if any go modules changed. mod.update: go.mod.update ## Update all go modules. -clean: csv-clean ## Remove all files that are created by building. +clean: ## Remove all files that are created by building. @$(MAKE) go.mod.clean @$(MAKE) -C images clean @rm -fr $(OUTPUT_DIR) $(WORK_DIR) @@ -164,15 +161,6 @@ distclean: clean ## Remove all files that are created by building or configuring prune: ## Prune cached artifacts. @$(MAKE) -C images prune -# Change how CRDs are generated for CSVs -csv-ceph: export MAX_DESC_LEN=0 # sets the description length to 0 since CSV cannot be bigger than 1MB -csv-ceph: export NO_OB_OBC_VOL_GEN=true -csv-ceph: csv-clean crds ## Generate a CSV file for OLM. - $(MAKE) -C images/ceph csv - -csv-clean: ## Remove existing OLM files. - @$(MAKE) -C images/ceph csv-clean - crds: $(CONTROLLER_GEN) $(YQ) @echo Updating CRD manifests @build/crds/build-crds.sh $(CONTROLLER_GEN) $(YQ) diff --git a/PendingReleaseNotes.md b/PendingReleaseNotes.md index ec2aabaeb..759b04c68 100644 --- a/PendingReleaseNotes.md +++ b/PendingReleaseNotes.md @@ -1,6 +1,6 @@ # Major Themes -v1.7... +v1.8... ## K8s Version Support @@ -8,49 +8,11 @@ v1.7... ## Breaking Changes -### Ceph - -- The Operator configuration option `ROOK_ALLOW_MULTIPLE_FILESYSTEMS` has been removed in favor of simply verifying the Ceph version is at least Pacific. -Multiple filesystems are stable since Ceph Pacific. -So users who had `ROOK_ALLOW_MULTIPLE_FILESYSTEMS` enabled will need to update their Ceph version to Pacific. +Rook-Cassandra v1.8 is separated from the main Rook project and can be released independently. This +is currently the last planned release of Rook-Cassandra. ## Features ### Core -### Ceph - -- Official Ceph images have moved from docker.io to quay.io. Users running tags like `v14.2`, `v15.2`, `v16.2` must change the registry URL. -So the CephCLuster spec field `image` must be updated to point to quay, like `image: quay.io/ceph/ceph:v16.2`. -- Add user data protection when deleting Rook-Ceph Custom Resources - - A CephCluster will not be deleted if there are any other Rook-Ceph Custom resources referencing - it with the assumption that they are using the underlying Ceph cluster. - - A CephObjectStore will not be deleted if there is a bucket present. In addition to protection - from deletion when users have data in the store, this implicitly protects these resources from - being deleted when there is a referencing ObjectBucketClaim present. - - See [the design](https://github.com/rook/rook/blob/master/design/ceph/resource-dependencies.md) - for detailed information. -- Add support for creating Hybrid Storage Pools - - Hybrid storage pool helps to create hybrid crush rule for choosing primary OSD for high performance - devices (ssd, nvme, etc) and remaining OSD for low performance devices (hdd). - - See [the design](Documentation/ceph-pool-crd.md#hybrid-storage-pools) for more details. - - Checkout the [ceph docs](https://docs.ceph.com/en/latest/rados/operations/crush-map/#custom-crush-rules) - for detailed information. -- Add support cephfs mirroring peer configuration, refer to the [configuration](Documentation/ceph-filesystem-crd.md#mirroring) for more details -- Add support for Kubernetes TLS secret for referring TLS certs needed for ceph RGW server. -- Stretch clusters are considered stable - - Ceph v16.2.5 or greater is required for stretch clusters -- The use of peer secret names in CephRBDMirror is deprecated. Please use CephBlockPool CR to configure peer secret names and import peers. Checkout the `mirroring` section in the CephBlockPool [spec](Documentation/ceph-pool-crd.md#spec) for more details. -- Update Ceph CSI to `v3.4.0` for more details read the [official release note](https://github.com/ceph/ceph-csi/releases/tag/v3.4.0) - ### Cassandra - -- CRDs converted from v1beta1 to v1 - - Schema is generated from the internal types for more complete validation - - Minimum K8s version for the v1 CRDs is K8s 1.16 - -### NFS - -- CRDs converted from v1beta1 to v1 - - Schema is generated from the internal types for more complete validation - - Minimum K8s version for the v1 CRDs is K8s 1.16 diff --git a/README.md b/README.md index 17a3e613f..aa67eba93 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,9 @@ Rook [![CNCF Status](https://img.shields.io/badge/cncf%20status-graduated-blue.svg)](https://www.cncf.io/projects) -[![GitHub release](https://img.shields.io/github/release/rook/rook/all.svg)](https://github.com/rook/rook/releases) -[![Docker Pulls](https://img.shields.io/docker/pulls/rook/ceph)](https://hub.docker.com/u/rook) -[![Go Report Card](https://goreportcard.com/badge/github.com/rook/rook)](https://goreportcard.com/report/github.com/rook/rook) -[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1599/badge)](https://bestpractices.coreinfrastructure.org/projects/1599) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Frook%2Frook.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Frook%2Frook?ref=badge_shield) +[![Docker Pulls](https://img.shields.io/docker/pulls/rook/cassandra)](https://hub.docker.com/u/rook) +[![Go Report Card](https://goreportcard.com/badge/github.com/rook/cassandra)](https://goreportcard.com/report/github.com/rook/cassandra) [![Slack](https://slack.rook.io/badge.svg)](https://slack.rook.io) -[![Twitter Follow](https://img.shields.io/twitter/follow/rook_io.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=rook_io&user_id=788180534543339520) # What is Rook? @@ -24,7 +20,7 @@ Rook is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CNCF ## Getting Started and Documentation -For installation, deployment, and administration, see our [Documentation](https://rook.github.io/docs/rook/master). +For installation, deployment, and administration of the Cassandra storage provider, see our [Documentation](https://rook.io/docs/cassandra/master). ## Contributing @@ -32,7 +28,7 @@ We welcome contributions. See [Contributing](CONTRIBUTING.md) to get started. ## Report a Bug -For filing bugs, suggesting improvements, or requesting new features, please open an [issue](https://github.com/rook/rook/issues). +For filing bugs, suggesting improvements, or requesting new features, please open an [issue](https://github.com/rook/cassandra/issues). ### Reporting Security Vulnerabilities @@ -68,34 +64,20 @@ Anyone who wants to discuss the direction of the project, design and implementat ## Project Status -The status of each storage provider supported by Rook can be found in the table below. -Each API group is assigned its own individual status to reflect their varying maturity and stability. -More details about API versioning and status in Kubernetes can be found on the Kubernetes [API versioning page](https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-versioning), but the key difference between the statuses are summarized below: +The status of each storage provider supported by Rook can be found in the [main Rook repo](https://github.com/rook/rook#project-status). -- **Alpha:** The API may change in incompatible ways in a later software release without notice, recommended for use only in short-lived testing clusters, due to increased risk of bugs and lack of long-term support. -- **Beta:** Support for the overall features will not be dropped, though details may change. Support for upgrading or migrating between versions will be provided, either through automation or manual steps. -- **Stable:** Features will appear in released software for many subsequent versions and support for upgrading between versions will be provided with software automation in the vast majority of scenarios. - -| Name | Details | API Group | Status | -| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | ------------------------------------------------------------------------------ | -| Ceph | [Ceph](https://ceph.com/) is a distributed storage system that provides file, block and object storage and is deployed in large scale production clusters. | ceph.rook.io/v1 | Stable | -| Cassandra | [Cassandra](http://cassandra.apache.org/) is a highly available NoSQL database featuring lightning fast performance, tunable consistency and massive scalability. [Scylla](https://www.scylladb.com) is a close-to-the-hardware rewrite of Cassandra in C++, which enables much lower latencies and higher throughput. | cassandra.rook.io/v1alpha1 | Alpha | -| NFS | [Network File System (NFS)](https://github.com/nfs-ganesha/nfs-ganesha/wiki) allows remote hosts to mount file systems over a network and interact with those file systems as though they are mounted locally. | nfs.rook.io/v1alpha1 | Alpha | -| CockroachDB | [CockroachDB](https://www.cockroachlabs.com/product/cockroachdb/) is a cloud-native SQL database for building global, scalable cloud services that survive disasters. | cockroachdb.rook.io/v1alpha1 | [Deprecated](https://github.com/rook/rook/issues/6990)* | -| EdgeFS | [EdgeFS](http://edgefs.io) is high-performance and fault-tolerant decentralized data fabric with access to object, file, NoSQL and block. | edgefs.rook.io/v1 | [Deprecated](https://github.com/rook/rook/issues/5823#issuecomment-703834989)* | -| YugabyteDB | [YugabyteDB](https://docs.yugabyte.com/latest/introduction/) is a high-performance, cloud-native distributed SQL database which can tolerate disk, node, zone and region failures automatically. | yugabytedb.rook.io/v1alpha1 | [Deprecated](https://github.com/rook/rook/issues/6992#issuecomment-771297708)* | - -\* CockroachDB, EdgeFS, and YugabyteDB were removed from Rook in v1.6. See [Rook v1.5](https://rook.github.io/docs/rook/v1.5/) docs if still interested. - For YugabyteDB, see the replacement operator [here](https://github.com/yugabyte/yugabyte-operator). +| Name | Details | API Group | Status | +| --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | ------ | +| Cassandra | [Cassandra](http://cassandra.apache.org/) is a highly available NoSQL database featuring lightning fast performance, tunable consistency and massive scalability. [Scylla](https://www.scylladb.com) is a close-to-the-hardware rewrite of Cassandra in C++, which enables much lower latencies and higher throughput. | cassandra.rook.io/v1alpha1 | Alpha | ### Official Releases -Official releases of Rook can be found on the [releases page](https://github.com/rook/rook/releases). -Please note that it is **strongly recommended** that you use [official releases](https://github.com/rook/rook/releases) of Rook, as unreleased versions from the master branch are subject to changes and incompatibilities that will not be supported in the official releases. +Official releases of the Cassandra operator can be found on the [releases page](https://github.com/rook/cassandra/releases). +Please note that it is **strongly recommended** that you use [official releases](https://github.com/rook/cassandra/releases) of Rook, as unreleased versions from the master branch are subject to changes and incompatibilities that will not be supported in the official releases. Builds from the master branch can have functionality changed and even removed at any time without compatibility support and without prior notice. +Releases of the Cassandra operator prior to v1.7 are found in the main [Rook repo](https://github.com/rook/rook/releases). + ## Licensing Rook is under the Apache 2.0 license. - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Frook%2Frook.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Frook%2Frook?ref=badge_large) diff --git a/ROADMAP.md b/ROADMAP.md deleted file mode 100644 index 8405d3b1a..000000000 --- a/ROADMAP.md +++ /dev/null @@ -1,50 +0,0 @@ -# Roadmap - -This document defines a high level roadmap for Rook development and upcoming releases. -The features and themes included in each milestone are optimistic in the sense that some do not have clear owners yet. -Community and contributor involvement is vital for successfully implementing all desired items for each release. -We hope that the items listed below will inspire further engagement from the community to keep Rook progressing and shipping exciting and valuable features. - -Any dates listed below and the specific issues that will ship in a given milestone are subject to change but should give a general idea of what we are planning. -See the [Github project boards](https://github.com/rook/rook/projects) for the most up-to-date issues and their status. - - -## Rook 1.7 - -The following high level features are targeted for Rook v1.7 (July 2021). For more detailed project tracking see the [v1.7 board](https://github.com/rook/rook/projects/21). - -* Ceph - * Helm chart for the cluster CR [#2109](https://github.com/rook/rook/issues/2109) - * Configure bucket notifications with a CRD ([design doc](https://github.com/rook/rook/blob/master/design/ceph/object/ceph-bucket-notification-crd.md)) - * Add alpha support for COSI (Container object storage interface) with K8s 1.22 [#7843](https://github.com/rook/rook/issues/7843) - * Disaster Recovery (DR): CSI solution for application failover in the event of cluster failure - * Allow OSDs on PVCs to automatically grow when the cluster is nearly full [#6101](https://github.com/rook/rook/issues/6101) - * OSD encryption key rotation [#7925](https://github.com/rook/rook/issues/7925) - * iSCSI gateway deployment [#4334](https://github.com/rook/rook/issues/4334) - * Use go-ceph to interact with object store instead of `radosgw-admin` [#7924](https://github.com/rook/rook/issues/7924) - * RGW Multi-site replication improvements towards declaring the feature stable [#6401](https://github.com/rook/rook/issues/6401) - * More complete solution for protecting against accidental cluster deletion [#7885](https://github.com/rook/rook/pull/7885) - * Remove support for Nautilus, focusing on support for Octopus and Pacific [#7908](https://github.com/rook/rook/issues/7908) - * Build hygiene - * Complete conversion from Jenkins pipeline to GitHub actions - -## Themes - -The general areas for improvements include the following, though may not be committed to a release. - -* Admission Controllers - * Improve custom resource validation for each storage provider -* Controller Runtime - * Update [remaining Rook controllers](https://github.com/rook/rook/issues?q=is%3Aissue+is%3Aopen+%22controller+runtime%22+label%3Areliability+) to build on the controller runtime -* Ceph - * Enable the admission controller by default [#6242](https://github.com/rook/rook/issues/6242) - * Dashboard-driven configuration after minimal CR install - * Simplify metadata backup and disaster recovery - * CSI Driver improvements tracked in the [CSI repo](https://github.com/ceph/ceph-csi) - * Support for Windows nodes -* Cassandra - * Handle loss of persistent local data [#2533](https://github.com/rook/rook/issues/2533) - * Enable automated repairs [#2531](https://github.com/rook/rook/issues/2531) - * Graduate CRDs to beta -* NFS - * Graduate CRDs to beta diff --git a/build/codegen/codegen.sh b/build/codegen/codegen.sh index 71deb8351..dcc7d35bd 100755 --- a/build/codegen/codegen.sh +++ b/build/codegen/codegen.sh @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -GROUP_VERSIONS="rook.io:v1alpha2 ceph.rook.io:v1 nfs.rook.io:v1alpha1 cassandra.rook.io:v1alpha1" +GROUP_VERSIONS="rook.io:v1alpha2 cassandra.rook.io:v1alpha1" scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" @@ -25,8 +25,8 @@ scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # run code deepcopy generation bash ${CODE_GENERATOR}/generate-groups.sh \ deepcopy \ - github.com/rook/rook/pkg/client \ - github.com/rook/rook/pkg/apis \ + github.com/rook/cassandra/pkg/client \ + github.com/rook/cassandra/pkg/apis \ "${GROUP_VERSIONS}" \ --output-base "$(dirname "${BASH_SOURCE[0]}")/../../../../.." \ --go-header-file "${scriptdir}/boilerplate.go.txt" @@ -34,9 +34,8 @@ bash ${CODE_GENERATOR}/generate-groups.sh \ # run code client,lister,informer generation bash ${CODE_GENERATOR}/generate-groups.sh \ client,lister,informer \ - github.com/rook/rook/pkg/client \ - github.com/rook/rook/pkg/apis \ + github.com/rook/cassandra/pkg/client \ + github.com/rook/cassandra/pkg/apis \ "${GROUP_VERSIONS}" \ --output-base "$(dirname "${BASH_SOURCE[0]}")/../../../../.." \ - --go-header-file "${scriptdir}/boilerplate.go.txt" \ - --plural-exceptions "CephNFS:CephNFSes" \ + --go-header-file "${scriptdir}/boilerplate.go.txt" diff --git a/build/common.sh b/build/common.sh index 3e9069f32..7b2e14b3a 100644 --- a/build/common.sh +++ b/build/common.sh @@ -1,5 +1,5 @@ #!/bin/bash -e - +set -u # Copyright 2016 The Rook Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,34 +15,44 @@ # limitations under the License. BUILD_HOST=$(hostname) -BUILD_REPO=github.com/rook/rook +BUILD_REPO=github.com/rook/cassandra BUILD_ROOT=$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd -P) SHA256CMD=${SHA256CMD:-shasum -a 256} -BUILD_REGISTRY=build-$(echo ${BUILD_HOST}-${BUILD_ROOT} | ${SHA256CMD} | cut -c1-8) +BUILD_REGISTRY=build-$(echo "${BUILD_HOST}"-"${BUILD_ROOT}" | ${SHA256CMD} | cut -c1-8) DOCKERCMD=${DOCKERCMD:-docker} -OUTPUT_DIR=${BUILD_ROOT}/_output -WORK_DIR=${BUILD_ROOT}/.work +export scriptdir +scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +export OUTPUT_DIR=${BUILD_ROOT}/_output +export WORK_DIR=${BUILD_ROOT}/.work CACHE_DIR=${BUILD_ROOT}/.cache -KUBEADM_DIND_DIR=${CACHE_DIR}/kubeadm-dind +export KUBEADM_DIND_DIR=${CACHE_DIR}/kubeadm-dind CROSS_IMAGE=${BUILD_REGISTRY}/cross-amd64 CROSS_IMAGE_VOLUME=cross-volume CROSS_RSYNC_PORT=10873 function ver() { - printf "%d%03d%03d%03d" $(echo "$1" | tr '.' ' ') + local full_ver maj min bug build + full_ver="$1" # functions should name input params for easier understanding + maj="$(echo "${full_ver}" | cut -f1 -d'.')" # when splitting a param, name the components for easier understanding + min="$(echo "${full_ver}" | cut -f2 -d'.')" + bug="$(echo "${full_ver}" | cut -f3 -d'.')" + build="$(echo "${full_ver}" | cut -f4 -d'.')" + printf "%d%03d%03d%03d" "${maj}" "${min}" "${bug}" "${build}" } function check_git() { # git version 2.6.6+ through 2.8.3 had a bug with submodules. this makes it hard # to share a cloned directory between host and container # see https://github.com/git/git/blob/master/Documentation/RelNotes/2.8.3.txt#L33 - local gitversion=$(git --version | cut -d" " -f3) - if (( $(ver ${gitversion}) > $(ver 2.6.6) && $(ver ${gitversion}) < $(ver 2.8.3) )); then - echo WARN: your running git version ${gitversion} which has a bug related to relative + local gitversion + gitversion=$(git --version | cut -d" " -f3) + + if (( $(ver "${gitversion}") > $(ver 2.6.6) && $(ver "${gitversion}") < $(ver 2.8.3) )); then + echo WARN: you are running git version "${gitversion}" which has a bug related to relative echo WARN: submodule paths. Please consider upgrading to 2.8.3 or later fi } @@ -56,18 +66,18 @@ function start_rsync_container() { -p ${CROSS_RSYNC_PORT}:873 \ -v ${CROSS_IMAGE_VOLUME}:/volume \ --entrypoint "/tini" \ - ${CROSS_IMAGE} \ + "${CROSS_IMAGE}" \ -- /build/rsyncd.sh } function wait_for_rsync() { # wait for rsync to come up local tries=100 - while (( ${tries} > 0 )) ; do + while (( tries > 0 )); do if rsync "rsync://localhost:${CROSS_RSYNC_PORT}/" &> /dev/null ; then return 0 fi - tries=$(( ${tries} - 1 )) + (( tries-- )) sleep 0.1 done echo ERROR: rsyncd did not come up >&2 @@ -77,8 +87,8 @@ function wait_for_rsync() { function stop_rsync_container() { local id=$1 - ${DOCKERCMD} stop ${id} &> /dev/null || true - ${DOCKERCMD} rm ${id} &> /dev/null || true + ${DOCKERCMD} stop "${id}" &> /dev/null || true + ${DOCKERCMD} rm "${id}" &> /dev/null || true } function run_rsync() { @@ -90,10 +100,11 @@ function run_rsync() { # run the container as an rsyncd daemon so that we can copy the # source tree to the container volume. - local id=$(start_rsync_container) + local id + id=$(start_rsync_container) # wait for rsync to come up - wait_for_rsync || stop_rsync_container ${id} + wait_for_rsync || { stop_rsync_container "${id}"; return 1; } # NOTE: add --progress to show files being syncd rsync \ @@ -101,15 +112,15 @@ function run_rsync() { --delete \ --prune-empty-dirs \ "$@" \ - $src $dst || { stop_rsync_container ${id}; return 1; } + "$src" "$dst" || { stop_rsync_container "${id}"; return 1; } - stop_rsync_container ${id} + stop_rsync_container "${id}" } function rsync_host_to_container() { - run_rsync ${scriptdir}/.. rsync://localhost:${CROSS_RSYNC_PORT}/volume/go/src/${BUILD_REPO} "$@" + run_rsync "${scriptdir}"/.. rsync://localhost:${CROSS_RSYNC_PORT}/volume/go/src/${BUILD_REPO} "$@" } function rsync_container_to_host() { - run_rsync rsync://localhost:${CROSS_RSYNC_PORT}/volume/go/src/${BUILD_REPO}/ ${scriptdir}/.. "$@" + run_rsync rsync://localhost:${CROSS_RSYNC_PORT}/volume/go/src/${BUILD_REPO}/ "${scriptdir}"/.. "$@" } diff --git a/build/crds/build-crds.sh b/build/crds/build-crds.sh index 2c3a2224e..bf06d5d57 100755 --- a/build/crds/build-crds.sh +++ b/build/crds/build-crds.sh @@ -32,108 +32,21 @@ if [[ -n "$BUILD_CRDS_INTO_DIR" ]]; then echo "Generating CRDs into dir $BUILD_CRDS_INTO_DIR" DESTINATION_ROOT="$BUILD_CRDS_INTO_DIR" fi -OLM_CATALOG_DIR="${DESTINATION_ROOT}/cluster/olm/ceph/deploy/crds" -CEPH_CRDS_FILE_PATH="${DESTINATION_ROOT}/cluster/examples/kubernetes/ceph/crds.yaml" -CEPH_HELM_CRDS_FILE_PATH="${DESTINATION_ROOT}/cluster/charts/rook-ceph/templates/resources.yaml" CASSANDRA_CRDS_DIR="${DESTINATION_ROOT}/cluster/examples/kubernetes/cassandra" -NFS_CRDS_DIR="${DESTINATION_ROOT}/cluster/examples/kubernetes/nfs" ############# # FUNCTIONS # ############# -copy_ob_obc_crds() { - mkdir -p "$OLM_CATALOG_DIR" - cp -f "${SCRIPT_ROOT}/cluster/olm/ceph/assemble/objectbucket.io_objectbucketclaims.yaml" "$OLM_CATALOG_DIR" - cp -f "${SCRIPT_ROOT}/cluster/olm/ceph/assemble/objectbucket.io_objectbuckets.yaml" "$OLM_CATALOG_DIR" -} - generating_crds_v1() { - echo "Generating ceph crds" - "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="./pkg/apis/ceph.rook.io/v1" output:crd:artifacts:config="$OLM_CATALOG_DIR" - # the csv upgrade is failing on the volumeClaimTemplate.metadata.annotations.crushDeviceClass unless we preserve the annotations as an unknown field - $YQ_BIN_PATH w -i "${OLM_CATALOG_DIR}"/ceph.rook.io_cephclusters.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.storage.properties.storageClassDeviceSets.items.properties.volumeClaimTemplates.items.properties.metadata.properties.annotations.x-kubernetes-preserve-unknown-fields true - echo "Generating cassandra crds" "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="./pkg/apis/cassandra.rook.io/v1alpha1" output:crd:artifacts:config="$CASSANDRA_CRDS_DIR" # Format with yq for consistent whitespace $YQ_BIN_PATH read $CASSANDRA_CRDS_DIR/cassandra.rook.io_clusters.yaml > $CASSANDRA_CRDS_DIR/crds.yaml rm -f $CASSANDRA_CRDS_DIR/cassandra.rook.io_clusters.yaml - - echo "Generating nfs crds" - "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="./pkg/apis/nfs.rook.io/v1alpha1" output:crd:artifacts:config="$NFS_CRDS_DIR" - # Format with yq for consistent whitespace - $YQ_BIN_PATH read $NFS_CRDS_DIR/nfs.rook.io_nfsservers.yaml > $NFS_CRDS_DIR/crds.yaml - rm -f $NFS_CRDS_DIR/nfs.rook.io_nfsservers.yaml -} - -generating_crds_v1alpha2() { - "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="./pkg/apis/rook.io/v1alpha2" output:crd:artifacts:config="$OLM_CATALOG_DIR" - # TODO: revisit later - # * remove copy_ob_obc_crds() - # * remove files cluster/olm/ceph/assemble/{objectbucket.io_objectbucketclaims.yaml,objectbucket.io_objectbuckets.yaml} - # Activate code below - # "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="./vendor/github.com/kube-object-storage/lib-bucket-provisioner/pkg/apis/objectbucket.io/v1alpha1" output:crd:artifacts:config="$OLM_CATALOG_DIR" -} - -generate_vol_rep_crds() { - echo "Generating volume replication crds in crds.yaml" - "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="github.com/csi-addons/volume-replication-operator/api/v1alpha1" output:crd:artifacts:config="$OLM_CATALOG_DIR" -} - -generating_main_crd() { - true > "$CEPH_CRDS_FILE_PATH" - true > "$CEPH_HELM_CRDS_FILE_PATH" -cat < "$CEPH_CRDS_FILE_PATH" -############################################################################## -# Create the CRDs that are necessary before creating your Rook cluster. -# These resources *must* be created before the cluster.yaml or their variants. -############################################################################## -EOF -} - -build_helm_resources() { - echo "Generating helm resources.yaml" - { - # add header - echo "{{- if .Values.crds.enabled }}" - echo "{{- if semverCompare \">=1.16.0-0\" .Capabilities.KubeVersion.GitVersion }}" - - # Add helm annotations to all CRDS and skip the first 4 lines of crds.yaml - "$YQ_BIN_PATH" w -d'*' "$CEPH_CRDS_FILE_PATH" "metadata.annotations[helm.sh/resource-policy]" keep | tail -n +5 - - # add else - echo "{{- else }}" - - # add footer - cat "${SCRIPT_ROOT}/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml" - # DO NOT REMOVE the empty line, it is necessary - echo "" - echo "{{- end }}" - echo "{{- end }}" - } >>"$CEPH_HELM_CRDS_FILE_PATH" } ######## # MAIN # ######## generating_crds_v1 - -if [ -z "$NO_OB_OBC_VOL_GEN" ]; then - echo "Generating v1alpha2 in crds.yaml" - copy_ob_obc_crds - generating_crds_v1alpha2 -fi - -generate_vol_rep_crds - -generating_main_crd - -for crd in "$OLM_CATALOG_DIR/"*.yaml; do - echo "---" >> "$CEPH_CRDS_FILE_PATH" # yq doesn't output doc separators - # Process each intermediate CRD file with yq to enforce consistent formatting in the final product - # regardless of whether yq was used in previous steps to alter CRD intermediate files. - $YQ_BIN_PATH read "$crd" >> "$CEPH_CRDS_FILE_PATH" -done - -build_helm_resources diff --git a/build/crds/crds.go b/build/crds/crds.go index 1bcaee88b..01bee7092 100644 --- a/build/crds/crds.go +++ b/build/crds/crds.go @@ -1,3 +1,4 @@ +//go:build crds // +build crds /* diff --git a/build/makelib/common.mk b/build/makelib/common.mk index 045794c63..24a5b1210 100644 --- a/build/makelib/common.mk +++ b/build/makelib/common.mk @@ -110,7 +110,7 @@ export SED_IN_PLACE echo.%: ; @echo $* = $($*) # Select which images (backends) to make; default to all possible images -IMAGES ?= ceph nfs cassandra +IMAGES ?= cassandra COMMA := , SPACE := diff --git a/build/makelib/golang.mk b/build/makelib/golang.mk index 38296d70c..6f9b12f53 100644 --- a/build/makelib/golang.mk +++ b/build/makelib/golang.mk @@ -48,7 +48,7 @@ GO_TEST_FLAGS ?= # ==================================================================================== # Setup go environment -GO_SUPPORTED_VERSIONS ?= 1.16 +GO_SUPPORTED_VERSIONS ?= 1.16|1.17 GO_PACKAGES := $(foreach t,$(GO_SUBDIRS),$(GO_PROJECT)/$(t)/...) GO_INTEGRATION_TEST_PACKAGES := $(foreach t,$(GO_INTEGRATION_TESTS_SUBDIRS),$(GO_PROJECT)/$(t)/integration) @@ -155,8 +155,11 @@ go.vet: @CGO_ENABLED=0 $(GOHOST) vet $(GO_COMMON_FLAGS) $(GO_PACKAGES) $(GO_INTEGRATION_TEST_PACKAGES) .PHONY: go.fmt +# ignore deepcopy generated files since the tool hardcoded the header with a "// +build" which in Golang 1.17 makes it fail gofmt since "////go:build" is preferred +# see: https://github.com/kubernetes/gengo/blob/master/examples/deepcopy-gen/generators/deepcopy.go#L136 +# https://github.com/kubernetes/gengo/pull/210 go.fmt: $(GOFMT) - @gofmt_out=$$($(GOFMT) -s -d -e $(GO_SUBDIRS) $(GO_INTEGRATION_TESTS_SUBDIRS) 2>&1) && [ -z "$${gofmt_out}" ] || (echo "$${gofmt_out}" 1>&2; exit 1) + @gofmt_out=$$(find $(GO_SUBDIRS) -name "*.go" -not -name "*.deepcopy.go" | xargs $(GOFMT) -s -d -e 2>&1) && [ -z "$${gofmt_out}" ] || (echo "$${gofmt_out}" 1>&2; exit 1) go.validate: go.vet go.fmt diff --git a/build/makelib/helm.mk b/build/makelib/helm.mk deleted file mode 100644 index 0f4d55ee0..000000000 --- a/build/makelib/helm.mk +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2017 The Rook Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# the helm charts to build -HELM_CHARTS ?= rook-ceph rook-ceph-cluster -HELM_BASE_URL ?= https://charts.rook.io -HELM_S3_BUCKET ?= rook.chart -HELM_CHARTS_DIR ?= $(ROOT_DIR)/cluster/charts -HELM_OUTPUT_DIR ?= $(OUTPUT_DIR)/charts - -HELM_HOME := $(abspath $(CACHE_DIR)/helm) -HELM_VERSION := v3.6.2 -HELM := $(TOOLS_HOST_DIR)/helm-$(HELM_VERSION) -HELM_INDEX := $(HELM_OUTPUT_DIR)/index.yaml -export HELM_HOME - -$(HELM_OUTPUT_DIR): - @mkdir -p $@ - -$(HELM): - @echo === installing helm - @mkdir -p $(TOOLS_HOST_DIR)/tmp - @curl -sL https://get.helm.sh/helm-$(HELM_VERSION)-$(GOHOSTOS)-$(GOHOSTARCH).tar.gz | tar -xz -C $(TOOLS_HOST_DIR)/tmp - @mv $(TOOLS_HOST_DIR)/tmp/$(GOHOSTOS)-$(GOHOSTARCH)/helm $(HELM) - @rm -fr $(TOOLS_HOST_DIR)/tmp - -define helm.chart -$(HELM_OUTPUT_DIR)/$(1)-$(VERSION).tgz: $(HELM) $(HELM_OUTPUT_DIR) $(shell find $(HELM_CHARTS_DIR)/$(1) -type f) - @echo === helm package $(1) - @cp -r $(HELM_CHARTS_DIR)/$(1) $(OUTPUT_DIR) - @$(SED_IN_PLACE) 's|VERSION|$(VERSION)|g' $(OUTPUT_DIR)/$(1)/values.yaml - @$(HELM) lint $(abspath $(OUTPUT_DIR)/$(1)) --set image.tag=$(VERSION) - @$(HELM) package --version $(VERSION) -d $(HELM_OUTPUT_DIR) $(abspath $(OUTPUT_DIR)/$(1)) -$(HELM_INDEX): $(HELM_OUTPUT_DIR)/$(1)-$(VERSION).tgz -endef -$(foreach p,$(HELM_CHARTS),$(eval $(call helm.chart,$(p)))) - -$(HELM_INDEX): $(HELM) $(HELM_OUTPUT_DIR) - @echo === helm index - @$(HELM) repo index $(HELM_OUTPUT_DIR) - -helm.build: $(HELM_INDEX) diff --git a/build/release/Makefile b/build/release/Makefile index ddd892eac..7f02e244a 100644 --- a/build/release/Makefile +++ b/build/release/Makefile @@ -15,7 +15,6 @@ all: build include ../makelib/common.mk -include ../makelib/helm.mk # ==================================================================================== # Options @@ -34,7 +33,7 @@ endif DOCS_VERSION := $(shell echo $(BRANCH_NAME) | sed -E "s/^release\-([0-9]+)\.([0-9]+)$$/v\1.\2/g") DOCS_DIR ?= $(ROOT_DIR)/Documentation DOCS_WORK_DIR := $(WORK_DIR)/rook.github.io -DOCS_VERSION_DIR := $(DOCS_WORK_DIR)/docs/rook/$(DOCS_VERSION) +DOCS_VERSION_DIR := $(DOCS_WORK_DIR)/docs/cassandra/$(DOCS_VERSION) ifdef GIT_API_TOKEN DOCS_GIT_REPO := https://$(GIT_API_TOKEN)@github.com/rook/rook.github.io.git @@ -55,7 +54,7 @@ REMOTE_NAME ?= origin PLATFORMS ?= $(ALL_PLATFORMS) ifneq ($(filter master release-%,$(BRANCH_NAME)),) -FLAVORS ?= output images docs helm +FLAVORS ?= output images docs else FLAVORS ?= output override BRANCH_NAME := pr/$(BRANCH_NAME) @@ -130,20 +129,6 @@ build.docs: publish.docs: cd $(DOCS_WORK_DIR) && DOCS_VERSION=$(DOCS_VERSION) $(MAKE) publish -# ==================================================================================== -# helm - -HELM_TEMP := $(shell mktemp -d) -HELM_URL := $(HELM_BASE_URL)/$(CHANNEL) - -promote.helm: $(HELM) -# copy existing charts to a temp dir, then combine with new charts, reindex, and upload - @$(S3_SYNC) s3://$(HELM_S3_BUCKET)/$(CHANNEL) $(HELM_TEMP) - @$(S3_SYNC) s3://$(S3_BUCKET)/build/$(BRANCH_NAME)/$(VERSION)/charts $(HELM_TEMP) - @$(HELM) repo index --url $(HELM_URL) $(HELM_TEMP) - @$(S3_SYNC_DEL) $(HELM_TEMP) s3://$(HELM_S3_BUCKET)/$(CHANNEL) - @rm -fr $(HELM_TEMP) - # ==================================================================================== # output diff --git a/build/release/README.md b/build/release/README.md index 16427b0d3..481a2caea 100644 --- a/build/release/README.md +++ b/build/release/README.md @@ -65,7 +65,7 @@ The Jenkins `release/tag` takes as input the version number to be released and t The job will will automatically tag the release and create the release branch. Once a new release branch is created or update, jenkins should perform the final release build as part of the `rook/rook` pipeline as usual. -The release branch is not by default created as "protected", so remember to go to the [branch settings](https://github.com/rook/rook/settings/branches) and mark it as "protected". +The release branch is not by default created as "protected", so remember to go to the [branch settings](https://github.com/rook/cassandra/settings/branches) and mark it as "protected". The protection settings should be similar to that of the previous release branches. ## Authoring release notes @@ -73,7 +73,7 @@ The protection settings should be similar to that of the previous release branch Every official release should have comprehensive and well written release notes published. While work is ongoing for a milestone, contributors should be keeping the [pending release notes](/PendingReleaseNotes.md) up to date, so that should be used as a starting point. -When the release is nearing completion, start a new release "draft" by going to https://github.com/rook/rook/releases/new and start with the content from the pending release notes. +When the release is nearing completion, start a new release "draft" by going to https://github.com/rook/cassandra/releases/new and start with the content from the pending release notes. Fill in the rest of the sections to fully capture the themes, accomplishments and caveats for the release. Ensure that you only click `Save draft` until the release is complete, after which you can then click `Publish release` to make them public. diff --git a/build/run b/build/run index 081fa5a60..7f3b1160c 100755 --- a/build/run +++ b/build/run @@ -19,11 +19,6 @@ source "${scriptdir}/common.sh" check_git -if [ ! -z $DOCKER_HOST ]; then - echo ERROR: we only support the case where docker is running locally for now. - exit 1 -fi - # build the the container if we've not done so already if [ "`uname -m`" != "x86_64" ]; then echo ERROR: we only support the cross container build on amd64 host. @@ -44,15 +39,12 @@ EOF USER_ARGS="-e BUILDER_UID=$( id -u ) -e BUILDER_GID=$( id -g )" BUILDER_HOME=/home/rook +KUBE_ARGS="" # setup kubectl from the host if available if [ -d ${HOME}/.kube ]; then KUBE_ARGS="-v ${HOME}/.kube:${BUILDER_HOME}/.kube" fi -if [ -x ${KUBEADM_DIND_DIR}/kubectl ]; then -KUBEADM_DIND_ARGS="-v ${KUBEADM_DIND_DIR}/kubectl:/usr/bin/kubectl" -fi - if [ "`uname -s`" != "Linux" ]; then # On non-linux hosts, its is EXTREMELY slow to mount the source @@ -79,7 +71,7 @@ if [ "`uname -s`" != "Linux" ]; then # /.aws (bind mounted to host ${HOME}/.aws) # /.docker (bind mounted to host ${HOME}/.docker) # /.cache (bind mounted to host ${CACHE_DIR} if set) - # /go/src/github.com/rook/rook (rsync'd from host ) + # /go/src/github.com/rook/cassandra (rsync'd from host ) # now copy the source tree to the container volume. Note this also # copies the .git directory but not the index.lock files which @@ -90,7 +82,7 @@ if [ "`uname -s`" != "Linux" ]; then --filter="- /.vscode/" \ --filter="- index.lock" - MOUNT_OPTS="${MOUNT_OPTS} -v ${CROSS_IMAGE_VOLUME}:${BUILDER_HOME}" + MOUNT_OPTS="-v ${CROSS_IMAGE_VOLUME}:${BUILDER_HOME}" else @@ -104,9 +96,9 @@ else # /.docker (bind mounted to host ${HOME}/.docker) # /go # /pkg (bind mounted to /.work/cross_pkg) - # /src/github.com/rook/rook (bind mounted to ) + # /src/github.com/rook/cassandra (bind mounted to ) - MOUNT_OPTS="${MOUNT_OPTS} \ + MOUNT_OPTS="\ -v ${scriptdir}/../.work/cross_pkg:${BUILDER_HOME}/go/pkg -v ${scriptdir}/..:${BUILDER_HOME}/go/src/${BUILD_REPO}" fi @@ -118,16 +110,10 @@ if [[ -n "${CACHE_DIR}" ]]; then -v ${CACHE_DIR}:${BUILDER_HOME}/go/src/${BUILD_REPO}/.cache" fi -# we copy credential files for github access -[[ -f ${HOME}/.netrc ]] && NETRC_ARGS="-v ${HOME}/.netrc:${BUILDER_HOME}/.netrc" - tty -s && TTY_ARGS=-ti || TTY_ARGS= -# docker seems more reliable when running with net=host. -if [ -z "${DISABLE_HOST_NETWORK}" ]; then - NET_ARGS="--net=host" -fi - +DISABLE_NESTED_DOCKER="" +DOCKER_NESTED_ARGS="" # BUGBUG: new docker clients switch to using OSX keychain. how do we copy creds? # sometimes we run docker inside docker. bind the docker config and socket if [ -z "${DISABLE_NESTED_DOCKER}" ]; then @@ -165,13 +151,10 @@ ${DOCKERCMD} run \ -v ${PWD}/_output:${BUILDER_HOME}/go/bin \ ${TTY_ARGS} \ ${KUBE_ARGS} \ - ${KUBEADM_DIND_ARGS} \ ${DOWNLOAD_ARGS} \ - ${NETRC_ARGS} \ ${AWS_ARGS} \ ${USER_ARGS} \ ${DOCKER_NESTED_ARGS} \ - ${NET_ARGS} \ ${MOUNT_OPTS} \ -w ${BUILDER_HOME}/go/src/${BUILD_REPO} \ ${CROSS_IMAGE} \ diff --git a/cluster/charts/rook-ceph-cluster/.helmignore b/cluster/charts/rook-ceph-cluster/.helmignore deleted file mode 100644 index 5ce4e2e57..000000000 --- a/cluster/charts/rook-ceph-cluster/.helmignore +++ /dev/null @@ -1,22 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj -*.tmpl diff --git a/cluster/charts/rook-ceph-cluster/Chart.yaml b/cluster/charts/rook-ceph-cluster/Chart.yaml deleted file mode 100644 index cc2b48f9b..000000000 --- a/cluster/charts/rook-ceph-cluster/Chart.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v2 -description: Manages a single Ceph cluster namespace for Rook -name: rook-ceph-cluster -version: 0.0.1 -icon: https://rook.io/images/rook-logo.svg -sources: - - https://github.com/rook/rook diff --git a/cluster/charts/rook-ceph-cluster/README.md b/cluster/charts/rook-ceph-cluster/README.md deleted file mode 100644 index 8091753b8..000000000 --- a/cluster/charts/rook-ceph-cluster/README.md +++ /dev/null @@ -1 +0,0 @@ -See the [Helm Ceph Cluster](/Documentation/helm-ceph-cluster.md) documentation. diff --git a/cluster/charts/rook-ceph-cluster/templates/NOTES.txt b/cluster/charts/rook-ceph-cluster/templates/NOTES.txt deleted file mode 100644 index 83659c9a2..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/NOTES.txt +++ /dev/null @@ -1,8 +0,0 @@ -The Ceph Cluster has been installed. Check its status by running: - kubectl --namespace {{ .Release.Namespace }} get cephcluster - -Visit https://rook.github.io/docs/rook/master/ceph-cluster-crd.html for more information about the Ceph CRD. - -Important Notes: -- You can only deploy a single cluster per namespace -- If you wish to delete this cluster and start fresh, you will also have to wipe the OSD disks using `sfdisk` diff --git a/cluster/charts/rook-ceph-cluster/templates/_helpers.tpl b/cluster/charts/rook-ceph-cluster/templates/_helpers.tpl deleted file mode 100644 index 8a7cf525d..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/_helpers.tpl +++ /dev/null @@ -1,33 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Define imagePullSecrets option to pass to all service accounts -*/}} -{{- define "imagePullSecrets" }} -{{- if .Values.imagePullSecrets -}} -imagePullSecrets: -{{ toYaml .Values.imagePullSecrets }} -{{- end -}} -{{- end -}} - -{{/* -Define the clusterName as defaulting to the release namespace -*/}} -{{- define "clusterName" -}} -{{ .Values.clusterName | default .Release.Namespace }} -{{- end -}} diff --git a/cluster/charts/rook-ceph-cluster/templates/cephblockpool.yaml b/cluster/charts/rook-ceph-cluster/templates/cephblockpool.yaml deleted file mode 100644 index 41856f5a5..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/cephblockpool.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- $root := . -}} -{{- range $blockpool := .Values.cephBlockPools -}} ---- -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: {{ $blockpool.name }} -spec: -{{ toYaml $blockpool.spec | indent 2 }} ---- -{{- if default false $blockpool.storageClass.enabled }} -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ $blockpool.storageClass.name }} - annotations: - storageclass.kubernetes.io/is-default-class: "{{ if default false $blockpool.storageClass.isDefault }}true{{ else }}false{{ end }}" -provisioner: {{ $root.Values.operatorNamespace }}.rbd.csi.ceph.com -parameters: - pool: {{ $blockpool.name }} - clusterID: {{ $root.Release.Namespace }} -{{ toYaml $blockpool.storageClass.parameters | indent 2 }} -reclaimPolicy: {{ default "Delete" $blockpool.storageClass.reclaimPolicy }} -allowVolumeExpansion: {{ default "true" $blockpool.storageClass.allowVolumeExpansion }} -{{ end }} -{{ end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/cephcluster.yaml b/cluster/charts/rook-ceph-cluster/templates/cephcluster.yaml deleted file mode 100644 index 75d60039c..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/cephcluster.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: {{ default .Release.Namespace .Values.clusterName }} -spec: - monitoring: - rulesNamespace: {{ default .Release.Namespace .Values.monitoring.rulesNamespaceOverride }} -{{ toYaml .Values.monitoring | indent 4 }} - -{{ toYaml .Values.cephClusterSpec | indent 2 }} diff --git a/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml b/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml deleted file mode 100644 index 73be71f60..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- $root := . -}} -{{- range $filesystem := .Values.cephFileSystems -}} ---- -apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: {{ $filesystem.name }} -spec: -{{ toYaml $filesystem.spec | indent 2 }} ---- -{{- if default false $filesystem.storageClass.enabled }} -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ $filesystem.storageClass.name }} -provisioner: {{ $root.Values.operatorNamespace }}.cephfs.csi.ceph.com -parameters: - fsName: {{ $filesystem.name }} - pool: {{ $filesystem.name }}-data0 - clusterID: {{ $root.Release.Namespace }} -{{ toYaml $filesystem.storageClass.parameters | indent 2 }} -reclaimPolicy: {{ default "Delete" $filesystem.storageClass.reclaimPolicy }} -{{ end }} -{{ end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml b/cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml deleted file mode 100644 index 21177f32b..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- $root := . -}} -{{- range $objectstore := .Values.cephObjectStores -}} ---- -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: {{ $objectstore.name }} -spec: -{{ toYaml $objectstore.spec | indent 2 }} ---- -{{- if default false $objectstore.storageClass.enabled }} -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ $objectstore.storageClass.name }} -provisioner: {{ $root.Release.Namespace }}.ceph.rook.io/bucket -reclaimPolicy: {{ default "Delete" $objectstore.storageClass.reclaimPolicy }} -parameters: - objectStoreName: {{ $objectstore.name }} - objectStoreNamespace: {{ $root.Release.Namespace }} -{{ toYaml $objectstore.storageClass.parameters | indent 2 }} -{{ end }} -{{ end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/clusterrolebinding.yaml b/cluster/charts/rook-ceph-cluster/templates/clusterrolebinding.yaml deleted file mode 100644 index 27943b33b..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{- if ne .Release.Namespace .Values.operatorNamespace }} ---- -# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-cluster-{{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-mgr-cluster -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr - namespace: {{ .Release.Namespace }} ---- -# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd-{{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-osd - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/configmap.yaml b/cluster/charts/rook-ceph-cluster/templates/configmap.yaml deleted file mode 100644 index 3586ed856..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/configmap.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- if .Values.configOverride }} ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: rook-config-override -data: - config: | -{{ .Values.configOverride | nindent 4 }} -{{- end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/deployment.yaml b/cluster/charts/rook-ceph-cluster/templates/deployment.yaml deleted file mode 100644 index 7e99948b2..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/deployment.yaml +++ /dev/null @@ -1,67 +0,0 @@ -{{- if .Values.toolbox.enabled }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-ceph-tools - labels: - app: rook-ceph-tools -spec: - replicas: 1 - selector: - matchLabels: - app: rook-ceph-tools - template: - metadata: - labels: - app: rook-ceph-tools - spec: - dnsPolicy: ClusterFirstWithHostNet -{{- $network := .Values.cephClusterSpec.network | default dict -}} -{{- if ($network.provider | default "") | eq "host" }} - hostNetwork: true -{{- end }} - containers: - - name: rook-ceph-tools - image: {{ .Values.toolbox.image }} - command: ["/tini"] - args: ["-g", "--", "/usr/local/bin/toolbox.sh"] - imagePullPolicy: IfNotPresent - env: - - name: ROOK_CEPH_USERNAME - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-username - - name: ROOK_CEPH_SECRET - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-secret - volumeMounts: - - mountPath: /etc/ceph - name: ceph-config - - name: mon-endpoint-volume - mountPath: /etc/rook - volumes: - - name: mon-endpoint-volume - configMap: - name: rook-ceph-mon-endpoints - items: - - key: data - path: mon-endpoints - - name: ceph-config - emptyDir: {} - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 5 -{{- if .Values.toolbox.tolerations }} -{{ toYaml .Values.toolbox.tolerations | indent 8 }} -{{- end }} -{{- if .Values.toolbox.affinity }} - affinity: -{{ toYaml .Values.toolbox.affinity | indent 8 }} -{{- end }} -{{- end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/ingress.yaml b/cluster/charts/rook-ceph-cluster/templates/ingress.yaml deleted file mode 100644 index efd6dd30e..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/ingress.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if .Values.ingress.dashboard.host }} ---- -{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} -apiVersion: networking.k8s.io/v1beta1 -{{ else }} -apiVersion: extensions/v1beta1 -{{ end -}} -kind: Ingress -metadata: - name: {{ template "clusterName" . }}-dashboard - {{- if .Values.ingress.dashboard.annotations }} - annotations: {{- toYaml .Values.ingress.dashboard.annotations | nindent 4 }} - {{- end }} -spec: - rules: - - host: {{ .Values.ingress.dashboard.host.name }} - http: - paths: - - path: {{ .Values.ingress.dashboard.host.path }} - backend: - serviceName: rook-ceph-mgr-dashboard - servicePort: http-dashboard - {{- if .Values.ingress.dashboard.tls }} - tls: {{- toYaml .Values.ingress.dashboard.tls | nindent 4 }} - {{- end }} -{{- end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/role.yaml b/cluster/charts/rook-ceph-cluster/templates/role.yaml deleted file mode 100644 index 36719cab5..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/role.yaml +++ /dev/null @@ -1,107 +0,0 @@ -{{- if ne .Release.Namespace .Values.operatorNamespace }} ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: ["ceph.rook.io"] - resources: ["cephclusters", "cephclusters/finalizers"] - verbs: ["get", "list", "create", "update", "delete"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr -rules: - - apiGroups: - - "" - resources: - - pods - - services - - pods/log - verbs: - - get - - list - - watch - - create - - update - - delete - - apiGroups: - - batch - resources: - - jobs - verbs: - - get - - list - - watch - - create - - update - - delete - - apiGroups: - - ceph.rook.io - resources: - - "*" - verbs: - - "*" ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter -rules: - - apiGroups: - - "" - resources: - - pods - - configmaps - verbs: - - get - - list - - watch - - create - - update - - delete ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-purge-osd -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get"] - - apiGroups: ["apps"] - resources: ["deployments"] - verbs: ["get", "delete" ] - - apiGroups: ["batch"] - resources: ["jobs"] - verbs: ["get", "list", "delete" ] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["delete"] - -{{- if .Values.monitoring.enabled }} ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-monitoring -rules: - - apiGroups: - - "monitoring.coreos.com" - resources: - - servicemonitors - - prometheusrules - verbs: - - get - - list - - watch - - create - - update - - delete -{{- end }} -{{- end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/rolebinding.yaml b/cluster/charts/rook-ceph-cluster/templates/rolebinding.yaml deleted file mode 100644 index 4d12640b2..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/rolebinding.yaml +++ /dev/null @@ -1,101 +0,0 @@ -{{- if ne .Release.Namespace .Values.operatorNamespace }} ---- -# Allow the operator to create resources in this cluster's namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cluster-mgmt -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-cluster-mgmt -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: {{ .Values.operatorNamespace }} ---- -# Allow the osd pods in this namespace to work with configmaps -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-osd ---- -# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-mgr -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr ---- -# Allow the ceph mgr to access the rook system resources necessary for the mgr modules -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-system-{{ .Release.Namespace }} - namespace: {{ .Values.operatorNamespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-mgr-system -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr - namespace: {{ .Release.Namespace }} ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-cmd-reporter -subjects: - - kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: {{ .Release.Namespace }} ---- -# Allow the osd purge job to run in this namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-purge-osd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-purge-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-purge-osd - namespace: {{ .Release.Namespace }} - -{{- if .Values.monitoring.enabled }} ---- -# Allow the operator to get ServiceMonitors in this cluster's namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-monitoring -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-monitoring -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: {{ .Values.operatorNamespace }} -{{- end }} -{{- end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/serviceaccount.yaml b/cluster/charts/rook-ceph-cluster/templates/serviceaccount.yaml deleted file mode 100644 index 16b7e2ef9..000000000 --- a/cluster/charts/rook-ceph-cluster/templates/serviceaccount.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if ne .Release.Namespace .Values.operatorNamespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-osd -{{ template "imagePullSecrets" . }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-mgr -{{ template "imagePullSecrets" . }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-cmd-reporter -{{ template "imagePullSecrets" . }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-purge-osd -{{ template "imagePullSecrets" . }} -{{- end }} diff --git a/cluster/charts/rook-ceph-cluster/values.yaml b/cluster/charts/rook-ceph-cluster/values.yaml deleted file mode 100644 index 7e284669c..000000000 --- a/cluster/charts/rook-ceph-cluster/values.yaml +++ /dev/null @@ -1,416 +0,0 @@ -# Default values for a single rook-ceph cluster -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Namespace of the main rook operator -operatorNamespace: rook-ceph - -# The metadata.name of the CephCluster CR. The default name is the same as the namespace. -# clusterName: rook-ceph - -# Ability to override ceph.conf -# configOverride: | -# [global] -# mon_allow_pool_delete = true -# osd_pool_default_size = 3 -# osd_pool_default_min_size = 2 - -# Installs a debugging toolbox deployment -toolbox: - enabled: false - image: rook/ceph:VERSION - tolerations: [] - affinity: {} - -monitoring: - # requires Prometheus to be pre-installed - # enabling will also create RBAC rules to allow Operator to create ServiceMonitors - enabled: false - rulesNamespaceOverride: - -# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts. -# imagePullSecrets: -# - name: my-registry-secret - -# All values below are taken from the CephCluster CRD -# More information can be found at [Ceph Cluster CRD](/Documentation/ceph-cluster-crd.md) -cephClusterSpec: - cephVersion: - # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). - # v14 is nautilus, v15 is octopus, and v16 is pacific. - # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different - # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. - # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419 - # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities - image: quay.io/ceph/ceph:v16.2.5 - # Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported. - # Future versions such as `pacific` would require this to be set to `true`. - # Do not set to true in production. - allowUnsupported: false - - # The path on the host where configuration files will be persisted. Must be specified. - # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. - # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. - dataDirHostPath: /var/lib/rook - - # Whether or not upgrade should continue even if a check fails - # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise - # Use at your OWN risk - # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades - skipUpgradeChecks: false - - # Whether or not continue if PGs are not clean during an upgrade - continueUpgradeAfterChecksEvenIfNotHealthy: false - - # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart. - # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one - # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would - # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. - # The default wait timeout is 10 minutes. - waitTimeoutForHealthyOSDInMinutes: 10 - - mon: - # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3. - count: 3 - # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. - # Mons should only be allowed on the same node for test environments where data loss is acceptable. - allowMultiplePerNode: false - - mgr: - # When higher availability of the mgr is needed, increase the count to 2. - # In that case, one mgr will be active and one in standby. When Ceph updates which - # mgr is active, Rook will update the mgr services to match the active mgr. - count: 1 - modules: - # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules - # are already enabled by other settings in the cluster CR. - - name: pg_autoscaler - enabled: true - - # enable the ceph dashboard for viewing cluster status - dashboard: - enabled: true - # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) - # urlPrefix: /ceph-dashboard - # serve the dashboard at the given port. - # port: 8443 - - # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/ceph-cluster-crd.md#network-configuration-settings - # network: - # # enable host networking - # provider: host - # # EXPERIMENTAL: enable the Multus network provider - # provider: multus - # selectors: - # # The selector keys are required to be `public` and `cluster`. - # # Based on the configuration, the operator will do the following: - # # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface - # # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' - # # - # # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus - # # - # # public: public-conf --> NetworkAttachmentDefinition object name in Multus - # # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus - # # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 - # ipFamily: "IPv6" - # # Ceph daemons to listen on both IPv4 and Ipv6 networks - # dualStack: false - - # enable the crash collector for ceph daemon crash collection - crashCollector: - disable: false - # Uncomment daysToRetain to prune ceph crash entries older than the - # specified number of days. - # daysToRetain: 30 - - # enable log collector, daemons will log on files and rotate - # logCollector: - # enabled: true - # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days. - - # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. - cleanupPolicy: - # Since cluster cleanup is destructive to data, confirmation is required. - # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". - # This value should only be set when the cluster is about to be deleted. After the confirmation is set, - # Rook will immediately stop configuring the cluster and only wait for the delete command. - # If the empty string is set, Rook will not destroy any data on hosts during uninstall. - confirmation: "" - # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion - sanitizeDisks: - # method indicates if the entire disk should be sanitized or simply ceph's metadata - # in both case, re-install is possible - # possible choices are 'complete' or 'quick' (default) - method: quick - # dataSource indicate where to get random bytes from to write on the disk - # possible choices are 'zero' (default) or 'random' - # using random sources will consume entropy from the system and will take much more time then the zero source - dataSource: zero - # iteration overwrite N times instead of the default (1) - # takes an integer value - iteration: 1 - # allowUninstallWithVolumes defines how the uninstall should be performed - # If set to true, cephCluster deletion does not wait for the PVs to be deleted. - allowUninstallWithVolumes: false - - # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. - # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and - # tolerate taints with a key of 'storage-node'. - # placement: - # all: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - storage-node - # podAffinity: - # podAntiAffinity: - # topologySpreadConstraints: - # tolerations: - # - key: storage-node - # operator: Exists - # # The above placement information can also be specified for mon, osd, and mgr components - # mon: - # # Monitor deployments may contain an anti-affinity rule for avoiding monitor - # # collocation on the same node. This is a required rule when host network is used - # # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a - # # preferred rule with weight: 50. - # osd: - # mgr: - # cleanup: - - # annotations: - # all: - # mon: - # osd: - # cleanup: - # prepareosd: - # # If no mgr annotations are set, prometheus scrape annotations will be set by default. - # mgr: - - # labels: - # all: - # mon: - # osd: - # cleanup: - # mgr: - # prepareosd: - # # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. - # # These labels can be passed as LabelSelector to Prometheus - # monitoring: - - # resources: - # # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory - # mgr: - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # # The above example requests/limits can also be added to the other components - # mon: - # osd: - # prepareosd: - # mgr-sidecar: - # crashcollector: - # logcollector: - # cleanup: - - # The option to automatically remove OSDs that are out and are safe to destroy. - removeOSDsIfOutAndSafeToRemove: false - - # priority classes to apply to ceph resources - # priorityClassNames: - # all: rook-ceph-default-priority-class - # mon: rook-ceph-mon-priority-class - # osd: rook-ceph-osd-priority-class - # mgr: rook-ceph-mgr-priority-class - - storage: # cluster level storage configuration and selection - useAllNodes: true - useAllDevices: true - # deviceFilter: - # config: - # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map - # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. - # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB - # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller - # osdsPerDevice: "1" # this value can be overridden at the node or device level - # encryptedDevice: "true" # the default value for this option is "false" - # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named - # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. - # nodes: - # - name: "172.17.4.201" - # devices: # specific devices to use for storage can be specified for each node - # - name: "sdb" - # - name: "nvme01" # multiple osds can be created on high performance devices - # config: - # osdsPerDevice: "5" - # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths - # config: # configuration can be specified at the node level which overrides the cluster level config - # - name: "172.17.4.301" - # deviceFilter: "^sd." - - # The section for configuring management of daemon disruptions during upgrade or fencing. - disruptionManagement: - # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically - # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will - # block eviction of OSDs by default and unblock them safely when drains are detected. - managePodBudgets: true - # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the - # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. - osdMaintenanceTimeout: 30 - # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. - # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. - # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. - pgHealthCheckTimeout: 0 - # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. - # Only available on OpenShift. - manageMachineDisruptionBudgets: false - # Namespace in which to watch for the MachineDisruptionBudgets. - machineDisruptionBudgetNamespace: openshift-machine-api - - # Configure the healthcheck and liveness probes for ceph pods. - # Valid values for daemons are 'mon', 'osd', 'status' - healthCheck: - daemonHealth: - mon: - disabled: false - interval: 45s - osd: - disabled: false - interval: 60s - status: - disabled: false - interval: 60s - # Change pod liveness probe, it works for all mon, mgr, and osd pods. - livenessProbe: - mon: - disabled: false - mgr: - disabled: false - osd: - disabled: false - -ingress: - dashboard: {} - # annotations: - # kubernetes.io/ingress.class: nginx - # external-dns.alpha.kubernetes.io/hostname: example.com - # nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2 - # host: - # name: example.com - # path: "/ceph-dashboard(/|$)(.*)" - # tls: - -cephBlockPools: - - name: ceph-blockpool - # see https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md#spec for available configuration - spec: - failureDomain: host - replicated: - size: 3 - storageClass: - enabled: true - name: ceph-block - isDefault: true - reclaimPolicy: Delete - allowVolumeExpansion: true - # see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration - parameters: - # (optional) mapOptions is a comma-separated list of map options. - # For krbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options - # For nbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # mapOptions: lock_on_read,queue_depth=1024 - - # (optional) unmapOptions is a comma-separated list of unmap options. - # For krbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options - # For nbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # unmapOptions: force - - # RBD image format. Defaults to "2". - imageFormat: "2" - # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. - imageFeatures: layering - # The secrets contain Ceph admin credentials. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph - csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph - # Specify the filesystem type of the volume. If not specified, csi-provisioner - # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock - # in hyperconverged settings where the volume is mounted on the same node as the osds. - csi.storage.k8s.io/fstype: ext4 - -cephFileSystems: - - name: ceph-filesystem - # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#filesystem-settings for available configuration - spec: - metadataPool: - replicated: - size: 3 - dataPools: - - failureDomain: host - replicated: - size: 3 - metadataServer: - activeCount: 1 - activeStandby: true - storageClass: - enabled: true - name: ceph-filesystem - reclaimPolicy: Delete - # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration - parameters: - # The secrets contain Ceph admin credentials. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph - csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph - # Specify the filesystem type of the volume. If not specified, csi-provisioner - # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock - # in hyperconverged settings where the volume is mounted on the same node as the osds. - csi.storage.k8s.io/fstype: ext4 - -cephObjectStores: - - name: ceph-objectstore - # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-store-crd.md#object-store-settings for available configuration - spec: - metadataPool: - failureDomain: host - replicated: - size: 3 - dataPool: - failureDomain: host - erasureCoded: - dataChunks: 2 - codingChunks: 1 - preservePoolsOnDelete: true - gateway: - port: 80 - # securePort: 443 - # sslCertificateRef: - instances: 1 - healthCheck: - bucket: - interval: 60s - storageClass: - enabled: true - name: ceph-bucket - reclaimPolicy: Delete - # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration - parameters: - # note: objectStoreNamespace and objectStoreName are configured by the chart - region: us-east-1 diff --git a/cluster/charts/rook-ceph/.helmignore b/cluster/charts/rook-ceph/.helmignore deleted file mode 100644 index 5ce4e2e57..000000000 --- a/cluster/charts/rook-ceph/.helmignore +++ /dev/null @@ -1,22 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj -*.tmpl diff --git a/cluster/charts/rook-ceph/Chart.yaml b/cluster/charts/rook-ceph/Chart.yaml deleted file mode 100644 index 715026d34..000000000 --- a/cluster/charts/rook-ceph/Chart.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v2 -description: File, Block, and Object Storage Services for your Cloud-Native Environment -name: rook-ceph -version: 0.0.1 -icon: https://rook.io/images/rook-logo.svg -sources: - - https://github.com/rook/rook diff --git a/cluster/charts/rook-ceph/README.md b/cluster/charts/rook-ceph/README.md deleted file mode 100644 index 272e6ab55..000000000 --- a/cluster/charts/rook-ceph/README.md +++ /dev/null @@ -1 +0,0 @@ -See the [Operator Helm Chart](/Documentation/helm-operator.md) documentation. diff --git a/cluster/charts/rook-ceph/templates/NOTES.txt b/cluster/charts/rook-ceph/templates/NOTES.txt deleted file mode 100644 index 92791bf62..000000000 --- a/cluster/charts/rook-ceph/templates/NOTES.txt +++ /dev/null @@ -1,11 +0,0 @@ -The Rook Operator has been installed. Check its status by running: - kubectl --namespace {{ .Release.Namespace }} get pods -l "app=rook-ceph-operator" - -Visit https://rook.io/docs/rook/master for instructions on how to create and configure Rook clusters - -Important Notes: -- You must customize the 'CephCluster' resource in the sample manifests for your cluster. -- Each CephCluster must be deployed to its own namespace, the samples use `rook-ceph` for the namespace. -- The sample manifests assume you also installed the rook-ceph operator in the `rook-ceph` namespace. -- The helm chart includes all the RBAC required to create a CephCluster CRD in the same namespace. -- Any disk devices you add to the cluster in the 'CephCluster' must be empty (no filesystem and no partitions). diff --git a/cluster/charts/rook-ceph/templates/_helpers.tpl b/cluster/charts/rook-ceph/templates/_helpers.tpl deleted file mode 100644 index 529b49017..000000000 --- a/cluster/charts/rook-ceph/templates/_helpers.tpl +++ /dev/null @@ -1,26 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Define imagePullSecrets option to pass to all service accounts -*/}} -{{- define "imagePullSecrets" }} -{{- if .Values.imagePullSecrets -}} -imagePullSecrets: -{{ toYaml .Values.imagePullSecrets }} -{{- end -}} -{{- end -}} diff --git a/cluster/charts/rook-ceph/templates/clusterrole.yaml b/cluster/charts/rook-ceph/templates/clusterrole.yaml deleted file mode 100644 index 2218ec33d..000000000 --- a/cluster/charts/rook-ceph/templates/clusterrole.yaml +++ /dev/null @@ -1,482 +0,0 @@ -{{- if .Values.rbacEnable }} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-system - labels: - operator: rook - storage-backend: ceph -rules: - # Most resources are represented by a string representation of their name, such as “pods”, just as it appears in the URL for the relevant API endpoint. - # However, some Kubernetes APIs involve a “subresource”, such as the logs for a pod. [...] - # To represent this in an RBAC role, use a slash to delimit the resource and subresource. - # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources - - apiGroups: [""] - resources: ["pods", "pods/log"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["pods/exec"] - verbs: ["create"] ---- -# The cluster role for managing all the cluster-specific resources in a namespace -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-ceph-cluster-mgmt - labels: - operator: rook - storage-backend: ceph -rules: -- apiGroups: - - "" - - apps - - extensions - resources: - - secrets - - pods - - pods/log - - services - - configmaps - - deployments - - daemonsets - verbs: - - get - - list - - watch - - patch - - create - - update - - delete ---- -# The cluster role for managing the Rook CRDs -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-ceph-global - labels: - operator: rook - storage-backend: ceph -rules: -- apiGroups: - - "" - resources: - # Pod access is needed for fencing - - pods - # Node access is needed for determining nodes where mons should run - - nodes - - nodes/proxy - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - # PVs and PVCs are managed by the Rook provisioner - - persistentvolumes - - persistentvolumeclaims - - endpoints - verbs: - - get - - list - - watch - - patch - - create - - update - - delete -- apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch -- apiGroups: - - batch - resources: - - jobs - - cronjobs - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - ceph.rook.io - resources: - - "*" - verbs: - - "*" -- apiGroups: - - rook.io - resources: - - "*" - verbs: - - "*" -- apiGroups: - - policy - - apps - - extensions - resources: - # This is for the clusterdisruption controller - - poddisruptionbudgets - # This is for both clusterdisruption and nodedrain controllers - - deployments - - replicasets - verbs: - - "*" -- apiGroups: - - healthchecking.openshift.io - resources: - - machinedisruptionbudgets - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - machine.openshift.io - resources: - - machines - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - storage.k8s.io - resources: - - csidrivers - verbs: - - create - - delete - - get - - update -- apiGroups: - - k8s.cni.cncf.io - resources: - - network-attachment-definitions - verbs: - - get ---- -# Aspects of ceph-mgr that require cluster-wide access -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-cluster - labels: - operator: rook - storage-backend: ceph -rules: -- apiGroups: - - "" - resources: - - configmaps - - nodes - - nodes/proxy - - persistentvolumes - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list - - get - - watch -- apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch ---- -# Aspects of ceph-mgr that require access to the system namespace -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-system -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-object-bucket - labels: - operator: rook - storage-backend: ceph -rules: -- apiGroups: - - "" - verbs: - - "*" - resources: - - secrets - - configmaps -- apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch -- apiGroups: - - "objectbucket.io" - verbs: - - "*" - resources: - - "*" ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list -# Use a default dict to avoid 'can't give argument to non-function' errors from text/template -{{- if ne ((.Values.agent | default (dict "mountSecurityMode" "")).mountSecurityMode | default "") "Any" }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-ceph-agent-mount - labels: - operator: rook - storage-backend: ceph -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - get -{{- end }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-nodeplugin -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "update"] - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list"] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-external-provisioner-runner -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments/status"] - verbs: ["patch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents/status"] - verbs: ["update"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create", "list", "watch", "delete", "get", "update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots/status"] - verbs: ["update"] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-nodeplugin -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "update"] - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["serviceaccounts"] - verbs: ["get"] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-external-provisioner-runner -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments/status"] - verbs: ["patch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents/status"] - verbs: ["update"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create", "list", "watch", "delete", "get", "update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots/status"] - verbs: ["update"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get"] - - apiGroups: ["replication.storage.openshift.io"] - resources: ["volumereplications", "volumereplicationclasses"] - verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] - - apiGroups: ["replication.storage.openshift.io"] - resources: ["volumereplications/finalizers"] - verbs: ["update"] - - apiGroups: ["replication.storage.openshift.io"] - resources: ["volumereplications/status"] - verbs: ["get", "patch", "update"] - - apiGroups: ["replication.storage.openshift.io"] - resources: ["volumereplicationclasses/status"] - verbs: ["get"] - - apiGroups: [""] - resources: ["serviceaccounts"] - verbs: ["get"] -{{- end }} -{{- if .Values.pspEnable }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: 'psp:rook' - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -rules: -- apiGroups: - - policy - resources: - - podsecuritypolicies - resourceNames: - - 00-rook-ceph-operator - verbs: - - use -{{- end }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-admission-controller-role -rules: - - apiGroups: ["ceph.rook.io"] - resources: ["*"] - verbs: ["get", "watch", "list"] diff --git a/cluster/charts/rook-ceph/templates/clusterrolebinding.yaml b/cluster/charts/rook-ceph/templates/clusterrolebinding.yaml deleted file mode 100644 index 36d539a25..000000000 --- a/cluster/charts/rook-ceph/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,291 +0,0 @@ -{{- if .Values.rbacEnable }} -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-system - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-system -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: {{ .Release.Namespace }} ---- -# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-global - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-global -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: {{ .Release.Namespace }} ---- -# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-mgr-cluster -subjects: -- kind: ServiceAccount - name: rook-ceph-mgr - namespace: {{ .Release.Namespace }} ---- -# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-osd -subjects: -- kind: ServiceAccount - name: rook-ceph-osd - namespace: {{ .Release.Namespace }} ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-object-bucket -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-object-bucket -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: {{ .Release.Namespace }} ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-nodeplugin -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-plugin-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: cephfs-csi-nodeplugin - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-provisioner-role -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: cephfs-external-provisioner-runner - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-provisioner-role -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: rbd-external-provisioner-runner - apiGroup: rbac.authorization.k8s.io - -{{- if .Values.pspEnable }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-ceph-system-psp - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-default-psp - namespace: {{ .Release.Namespace }} - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: default - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-ceph-system-psp-users - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-system-psp-user -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-cephfs-provisioner-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-cephfs-plugin-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-plugin-sa - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-rbd-plugin-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-plugin-sa - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-rbd-provisioner-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: {{ .Release.Namespace }} ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-nodeplugin -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-plugin-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: rbd-csi-nodeplugin - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-osd-psp - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-ceph-osd - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-mgr-psp - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr - namespace: {{ .Release.Namespace }} ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-admission-controller-rolebinding -subjects: - - kind: ServiceAccount - name: rook-ceph-admission-controller - apiGroup: "" - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: rook-ceph-admission-controller-role - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-cmd-reporter-psp - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: {{ .Release.Namespace }} -{{- end }} -{{- end }} diff --git a/cluster/charts/rook-ceph/templates/deployment.yaml b/cluster/charts/rook-ceph/templates/deployment.yaml deleted file mode 100644 index c6402c129..000000000 --- a/cluster/charts/rook-ceph/templates/deployment.yaml +++ /dev/null @@ -1,352 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-ceph-operator - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -spec: - replicas: 1 - selector: - matchLabels: - app: rook-ceph-operator - template: - metadata: - labels: - app: rook-ceph-operator - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{- if .Values.annotations }} - annotations: -{{ toYaml .Values.annotations | indent 8 }} -{{- end }} - spec: - containers: - - name: rook-ceph-operator - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - args: ["ceph", "operator"] - volumeMounts: - - mountPath: /var/lib/rook - name: rook-config - - mountPath: /etc/ceph - name: default-config-dir - env: - - name: ROOK_CURRENT_NAMESPACE_ONLY - value: {{ .Values.currentNamespaceOnly | quote }} -{{- if .Values.agent }} -{{- if .Values.agent.toleration }} - - name: AGENT_TOLERATION - value: {{ .Values.agent.toleration }} -{{- end }} -{{- if .Values.agent.tolerationKey }} - - name: AGENT_TOLERATION_KEY - value: {{ .Values.agent.tolerationKey }} -{{- end }} -{{- if .Values.agent.tolerations }} - - name: AGENT_TOLERATIONS - value: {{ toYaml .Values.agent.tolerations | quote }} -{{- end }} -{{- if .Values.agent.nodeAffinity }} - - name: AGENT_NODE_AFFINITY - value: {{ .Values.agent.nodeAffinity }} -{{- end }} -{{- if .Values.agent.priorityClassName }} - - name: AGENT_PRIORITY_CLASS_NAME - value: {{ .Values.agent.priorityClassName }} -{{- end }} -{{- if .Values.agent.mountSecurityMode }} - - name: AGENT_MOUNT_SECURITY_MODE - value: {{ .Values.agent.mountSecurityMode }} -{{- end }} -{{- if .Values.agent.flexVolumeDirPath }} - - name: FLEXVOLUME_DIR_PATH - value: {{ .Values.agent.flexVolumeDirPath }} -{{- end }} -{{- if .Values.agent.libModulesDirPath }} - - name: LIB_MODULES_DIR_PATH - value: {{ .Values.agent.libModulesDirPath }} -{{- end }} -{{- if .Values.agent.mounts }} - - name: AGENT_MOUNTS - value: {{ .Values.agent.mounts }} -{{- end }} -{{- end }} -{{- if .Values.discover }} -{{- if .Values.discover.toleration }} - - name: DISCOVER_TOLERATION - value: {{ .Values.discover.toleration }} -{{- end }} -{{- if .Values.discover.tolerationKey }} - - name: DISCOVER_TOLERATION_KEY - value: {{ .Values.discover.tolerationKey }} -{{- end }} -{{- if .Values.discover.tolerations }} - - name: DISCOVER_TOLERATIONS - value: {{ toYaml .Values.discover.tolerations | quote }} -{{- end }} -{{- if .Values.discover.priorityClassName }} - - name: DISCOVER_PRIORITY_CLASS_NAME - value: {{ .Values.discover.priorityClassName }} -{{- end }} -{{- if .Values.discover.nodeAffinity }} - - name: DISCOVER_AGENT_NODE_AFFINITY - value: {{ .Values.discover.nodeAffinity }} -{{- end }} -{{- if .Values.discover.podLabels }} - - name: DISCOVER_AGENT_POD_LABELS - value: {{ .Values.discover.podLabels }} -{{- end }} -{{- end }} - - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED - value: "{{ .Values.hostpathRequiresPrivileged }}" - - name: ROOK_LOG_LEVEL - value: {{ .Values.logLevel }} - - name: ROOK_ENABLE_SELINUX_RELABELING - value: "{{ .Values.enableSelinuxRelabeling }}" - - name: ROOK_DISABLE_DEVICE_HOTPLUG - value: "{{ .Values.disableDeviceHotplug }}" -{{- if .Values.csi }} - - name: ROOK_CSI_ENABLE_RBD - value: {{ .Values.csi.enableRbdDriver | quote }} - - name: ROOK_CSI_ENABLE_CEPHFS - value: {{ .Values.csi.enableCephfsDriver | quote }} - - name: CSI_ENABLE_CEPHFS_SNAPSHOTTER - value: {{ .Values.csi.enableCephfsSnapshotter | quote }} - - name: CSI_ENABLE_RBD_SNAPSHOTTER - value: {{ .Values.csi.enableRBDSnapshotter | quote }} - - name: CSI_PLUGIN_PRIORITY_CLASSNAME - value: {{ .Values.csi.pluginPriorityClassName | quote }} - - name: CSI_PROVISIONER_PRIORITY_CLASSNAME - value: {{ .Values.csi.provisionerPriorityClassName | quote }} - - name: CSI_ENABLE_OMAP_GENERATOR - value: {{ .Values.csi.enableOMAPGenerator | quote }} - - name: CSI_ENABLE_VOLUME_REPLICATION - value: {{ .Values.csi.volumeReplication.enabled | quote }} -{{- if .Values.csi.enableCSIHostNetwork }} - - name: CSI_ENABLE_HOST_NETWORK - value: {{ .Values.csi.enableCSIHostNetwork | quote }} -{{- end }} -{{- if .Values.csi.cephFSPluginUpdateStrategy }} - - name: CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY - value: {{ .Values.csi.cephFSPluginUpdateStrategy | quote }} -{{- end }} -{{- if .Values.csi.rbdFSGroupPolicy }} - - name: CSI_RBD_FSGROUPPOLICY - value: {{ .Values.csi.rbdFSGroupPolicy | quote }} -{{- end }} -{{- if .Values.csi.cephFSFSGroupPolicy }} - - name: CSI_CEPHFS_FSGROUPPOLICY - value: {{ .Values.csi.cephFSFSGroupPolicy | quote }} -{{- end }} -{{- if .Values.csi.rbdPluginUpdateStrategy }} - - name: CSI_RBD_PLUGIN_UPDATE_STRATEGY - value: {{ .Values.csi.rbdPluginUpdateStrategy | quote }} -{{- end }} -{{- if .Values.csi.kubeletDirPath }} - - name: ROOK_CSI_KUBELET_DIR_PATH - value: {{ .Values.csi.kubeletDirPath | quote }} -{{- end }} - - name: ROOK_CSI_ENABLE_GRPC_METRICS - value: {{ .Values.csi.enableGrpcMetrics | quote }} -{{- if .Values.csi.cephcsi }} -{{- if .Values.csi.cephcsi.image }} - - name: ROOK_CSI_CEPH_IMAGE - value: {{ .Values.csi.cephcsi.image | quote }} -{{- end }} -{{- end }} -{{- if .Values.csi.registrar }} -{{- if .Values.csi.registrar.image }} - - name: ROOK_CSI_REGISTRAR_IMAGE - value: {{ .Values.csi.registrar.image | quote }} -{{- end }} -{{- end }} -{{- if .Values.csi.provisioner }} -{{- if .Values.csi.provisioner.image }} - - name: ROOK_CSI_PROVISIONER_IMAGE - value: {{ .Values.csi.provisioner.image | quote }} -{{- end }} -{{- end }} -{{- if .Values.csi.snapshotter }} -{{- if .Values.csi.snapshotter.image }} - - name: ROOK_CSI_SNAPSHOTTER_IMAGE - value: {{ .Values.csi.snapshotter.image | quote }} -{{- end }} -{{- end }} -{{- if .Values.csi.attacher }} -{{- if .Values.csi.attacher.image }} - - name: ROOK_CSI_ATTACHER_IMAGE - value: {{ .Values.csi.attacher.image | quote }} -{{- end }} -{{- end }} -{{- if .Values.csi.resizer }} -{{- if .Values.csi.resizer.image }} - - name: ROOK_CSI_RESIZER_IMAGE - value: {{ .Values.csi.resizer.image | quote }} -{{- end }} -{{- end }} -{{- if .Values.csi.volumeReplication }} -{{- if .Values.csi.volumeReplication.image }} - - name: CSI_VOLUME_REPLICATION_IMAGE - value: {{ .Values.csi.volumeReplication.image | quote }} -{{- end }} -{{- end }} -{{- if .Values.csi.cephfsPodLabels }} - - name: ROOK_CSI_CEPHFS_POD_LABELS - value: {{ .Values.csi.cephfsPodLabels | quote }} -{{- end }} -{{- if .Values.csi.rbdPodLabels }} - - name: ROOK_CSI_RBD_POD_LABELS - value: {{ .Values.csi.rbdPodLabels | quote }} -{{- end }} -{{- if .Values.csi.provisionerTolerations }} - - name: CSI_PROVISIONER_TOLERATIONS - value: {{ toYaml .Values.csi.provisionerTolerations | quote }} -{{- end }} -{{- if .Values.csi.provisionerNodeAffinity }} - - name: CSI_PROVISIONER_NODE_AFFINITY - value: {{ .Values.csi.provisionerNodeAffinity }} -{{- end }} -{{- if .Values.csi.rbdProvisionerTolerations }} - - name: CSI_RBD_PROVISIONER_TOLERATIONS - value: {{ toYaml .Values.csi.rbdProvisionerTolerations | quote }} -{{- end }} -{{- if .Values.csi.rbdProvisionerNodeAffinity }} - - name: CSI_RBD_PROVISIONER_NODE_AFFINITY - value: {{ .Values.csi.rbdProvisionerNodeAffinity }} -{{- end }} -{{- if .Values.csi.cephFSProvisionerTolerations }} - - name: CSI_CEPHFS_PROVISIONER_TOLERATIONS - value: {{ toYaml .Values.csi.cephFSProvisionerTolerations | quote }} -{{- end }} -{{- if .Values.csi.cephFSProvisionerNodeAffinity }} - - name: CSI_CEPHFS_PROVISIONER_NODE_AFFINITY - value: {{ .Values.csi.cephFSProvisionerNodeAffinity }} -{{- end }} -{{- if .Values.csi.allowUnsupportedVersion }} - - name: ROOK_CSI_ALLOW_UNSUPPORTED_VERSION - value: {{ .Values.csi.allowUnsupportedVersion | quote }} -{{- end }} -{{- if .Values.csi.pluginTolerations }} - - name: CSI_PLUGIN_TOLERATIONS - value: {{ toYaml .Values.csi.pluginTolerations | quote }} -{{- end }} -{{- if .Values.csi.pluginNodeAffinity }} - - name: CSI_PLUGIN_NODE_AFFINITY - value: {{ .Values.csi.pluginNodeAffinity }} -{{- end }} -{{- if .Values.csi.rbdPluginTolerations }} - - name: CSI_RBD_PLUGIN_TOLERATIONS - value: {{ toYaml .Values.csi.rbdPluginTolerations | quote }} -{{- end }} -{{- if .Values.csi.rbdPluginNodeAffinity }} - - name: CSI_RBD_PLUGIN_NODE_AFFINITY - value: {{ .Values.csi.rbdPluginNodeAffinity }} -{{- end }} -{{- if .Values.csi.cephFSPluginTolerations }} - - name: CSI_CEPHFS_PLUGIN_TOLERATIONS - value: {{ toYaml .Values.csi.cephFSPluginTolerations | quote }} -{{- end }} -{{- if .Values.csi.cephFSPluginNodeAffinity }} - - name: CSI_CEPHFS_PLUGIN_NODE_AFFINITY - value: {{ .Values.csi.cephFSPluginNodeAffinity }} -{{- end }} -{{- if .Values.csi.cephfsGrpcMetricsPort }} - - name: CSI_CEPHFS_GRPC_METRICS_PORT - value: {{ .Values.csi.cephfsGrpcMetricsPort | quote }} -{{- end }} -{{- if .Values.csi.cephfsLivenessMetricsPort }} - - name: CSI_CEPHFS_LIVENESS_METRICS_PORT - value: {{ .Values.csi.cephfsLivenessMetricsPort | quote }} -{{- end }} -{{- if .Values.csi.rbdGrpcMetricsPort }} - - name: CSI_RBD_GRPC_METRICS_PORT - value: {{ .Values.csi.rbdGrpcMetricsPort | quote }} -{{- end }} -{{- if .Values.csi.rbdLivenessMetricsPort }} - - name: CSI_RBD_LIVENESS_METRICS_PORT - value: {{ .Values.csi.rbdLivenessMetricsPort | quote }} -{{- end }} -{{- if .Values.csi.forceCephFSKernelClient }} - - name: CSI_FORCE_CEPHFS_KERNEL_CLIENT - value: {{ .Values.csi.forceCephFSKernelClient | quote }} -{{- end }} -{{- if .Values.csi.logLevel }} - - name: CSI_LOG_LEVEL - value: {{ .Values.csi.logLevel | quote }} -{{- end }} -{{- if .Values.csi.csiRBDProvisionerResource }} - - name: CSI_RBD_PROVISIONER_RESOURCE - value: {{ .Values.csi.csiRBDProvisionerResource | quote }} -{{- end }} -{{- if .Values.csi.csiRBDPluginResource }} - - name: CSI_RBD_PLUGIN_RESOURCE - value: {{ .Values.csi.csiRBDPluginResource | quote }} -{{- end }} -{{- if .Values.csi.csiCephFSProvisionerResource }} - - name: CSI_CEPHFS_PROVISIONER_RESOURCE - value: {{ .Values.csi.csiCephFSProvisionerResource | quote }} -{{- end }} -{{- if .Values.csi.csiCephFSPluginResource }} - - name: CSI_CEPHFS_PLUGIN_RESOURCE - value: {{ .Values.csi.csiCephFSPluginResource | quote }} -{{- end }} -{{- end }} - - name: ROOK_ENABLE_FLEX_DRIVER - value: "{{ .Values.enableFlexDriver }}" - - name: ROOK_ENABLE_DISCOVERY_DAEMON - value: "{{ .Values.enableDiscoveryDaemon }}" - - name: ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS - value: "{{ .Values.cephCommandsTimeoutSeconds }}" - - name: ROOK_OBC_WATCH_OPERATOR_NAMESPACE - value: "{{ .Values.enableOBCWatchOperatorNamespace }}" - - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace -{{- if .Values.admissionController }} -{{- if .Values.admissionController.tolerations }} - - name: ADMISSION_CONTROLLER_TOLERATIONS - value: {{ toYaml .Values.admissionController.tolerations | quote }} -{{- end }} -{{- if .Values.admissionController.nodeAffinity }} - - name: ADMISSION_CONTROLLER_NODE_AFFINITY - value: {{ .Values.admissionController.nodeAffinity }} -{{- end }} -{{- end }} -{{- if .Values.unreachableNodeTolerationSeconds }} - - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS - value: {{ .Values.unreachableNodeTolerationSeconds | quote }} -{{- end }} -{{- if .Values.resources }} - resources: -{{ toYaml .Values.resources | indent 10 }} -{{- end }} -{{- if .Values.useOperatorHostNetwork }} - hostNetwork: true -{{- end }} -{{- if .Values.nodeSelector }} - nodeSelector: -{{ toYaml .Values.nodeSelector | indent 8 }} -{{- end }} -{{- if .Values.tolerations }} - tolerations: -{{ toYaml .Values.tolerations | indent 8 }} -{{- end }} -{{- if .Values.rbacEnable }} - serviceAccountName: rook-ceph-system -{{- end }} - volumes: - - name: rook-config - emptyDir: {} - - name: default-config-dir - emptyDir: {} diff --git a/cluster/charts/rook-ceph/templates/psp.yaml b/cluster/charts/rook-ceph/templates/psp.yaml deleted file mode 100644 index f0c74fd0b..000000000 --- a/cluster/charts/rook-ceph/templates/psp.yaml +++ /dev/null @@ -1,82 +0,0 @@ -{{- if .Values.pspEnable }} -# PSP for rook-ceph-operator - -# Most of the teams follow the kubernetes docs and have these PSPs. -# * privileged (for kube-system namespace) -# * restricted (for all logged in users) -# -# If we name it as `rook-ceph-operator`, it comes next to `restricted` PSP alphabetically, -# and applies `restricted` capabilities to `rook-system`. That's reason this is named with `00-rook-ceph-operator`, -# so it stays somewhere close to top and `rook-system` gets the intended PSP. -# -# More info on PSP ordering : https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order - -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: 00-rook-ceph-operator - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' -spec: - privileged: true - allowedCapabilities: - # required by CSI - - SYS_ADMIN - # fsGroup - the flexVolume agent has fsGroup capabilities and could potentially be any group - fsGroup: - rule: RunAsAny - # runAsUser, supplementalGroups - Rook needs to run some pods as root - # Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time - runAsUser: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - # seLinux - seLinux context is unknown ahead of time; set if this is well-known - seLinux: - rule: RunAsAny - volumes: - # recommended minimum set - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret - - projected - # required for Rook - - hostPath - - flexVolume - # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known - # allowedHostPaths: - # - pathPrefix: "/run/udev" # for OSD prep - # readOnly: false - # - pathPrefix: "/dev" # for OSD prep - # readOnly: false - # - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to - # readOnly: false - # Ceph requires host IPC for setting up encrypted devices - hostIPC: true - # Ceph OSDs need to share the same PID namespace - hostPID: true - # hostNetwork can be set to 'false' if host networking isn't used - hostNetwork: true - hostPorts: - # Ceph messenger protocol v1 - - min: 6789 - max: 6790 # <- support old default port - # Ceph messenger protocol v2 - - min: 3300 - max: 3300 - # Ceph RADOS ports for OSDs, MDSes - - min: 6800 - max: 7300 - # # Ceph dashboard port HTTP (not recommended) - # - min: 7000 - # max: 7000 - # Ceph dashboard port HTTPS - - min: 8443 - max: 8443 - # Ceph mgr Prometheus Metrics - - min: 9283 - max: 9283 -{{- end }} diff --git a/cluster/charts/rook-ceph/templates/resources.yaml b/cluster/charts/rook-ceph/templates/resources.yaml deleted file mode 100644 index 76130ff31..000000000 --- a/cluster/charts/rook-ceph/templates/resources.yaml +++ /dev/null @@ -1,9954 +0,0 @@ -{{- if .Values.crds.enabled }} -{{- if semverCompare ">=1.16.0-0" .Capabilities.KubeVersion.GitVersion }} -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephblockpools.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephBlockPool - listKind: CephBlockPoolList - plural: cephblockpools - singular: cephblockpool - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephBlockPool represents a Ceph Storage Pool - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: PoolSpec represents the spec of ceph pool - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - status: - description: CephBlockPoolStatus represents the mirroring status of Ceph Storage Pool - properties: - info: - additionalProperties: - type: string - nullable: true - type: object - mirroringInfo: - description: MirroringInfoSpec is the status of the pool mirroring - properties: - details: - type: string - lastChanged: - type: string - lastChecked: - type: string - mode: - description: Mode is the mirroring mode - type: string - peers: - description: Peers are the list of peer sites connected to that cluster - items: - description: PeersSpec contains peer details - properties: - client_name: - description: ClientName is the CephX user used to connect to the peer - type: string - direction: - description: Direction is the peer mirroring direction - type: string - mirror_uuid: - description: MirrorUUID is the mirror UUID - type: string - site_name: - description: SiteName is the current site name - type: string - uuid: - description: UUID is the peer UUID - type: string - type: object - type: array - site_name: - description: SiteName is the current site name - type: string - type: object - mirroringStatus: - description: MirroringStatusSpec is the status of the pool mirroring - properties: - details: - description: Details contains potential status errors - type: string - lastChanged: - description: LastChanged is the last time time the status last changed - type: string - lastChecked: - description: LastChecked is the last time time the status was checked - type: string - summary: - description: Summary is the mirroring status summary - properties: - daemon_health: - description: DaemonHealth is the health of the mirroring daemon - type: string - health: - description: Health is the mirroring health - type: string - image_health: - description: ImageHealth is the health of the mirrored image - type: string - states: - description: States is the various state for all mirrored images - nullable: true - properties: - error: - description: Error is when the mirroring state is errored - type: integer - replaying: - description: Replaying is when the replay of the mirroring journal is on-going - type: integer - starting_replay: - description: StartingReplay is when the replay of the mirroring journal starts - type: integer - stopped: - description: Stopped is when the mirroring state is stopped - type: integer - stopping_replay: - description: StopReplaying is when the replay of the mirroring journal stops - type: integer - syncing: - description: Syncing is when the image is syncing - type: integer - unknown: - description: Unknown is when the mirroring state is unknown - type: integer - type: object - type: object - type: object - phase: - description: ConditionType represent a resource's status - type: string - snapshotScheduleStatus: - description: SnapshotScheduleStatusSpec is the status of the snapshot schedule - properties: - details: - description: Details contains potential status errors - type: string - lastChanged: - description: LastChanged is the last time time the status last changed - type: string - lastChecked: - description: LastChecked is the last time time the status was checked - type: string - snapshotSchedules: - description: SnapshotSchedules is the list of snapshots scheduled - items: - description: SnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool - properties: - image: - description: Image is the mirrored image - type: string - items: - description: Items is the list schedules times for a given snapshot - items: - description: SnapshotSchedule is a schedule - properties: - interval: - description: Interval is the interval in which snapshots will be taken - type: string - start_time: - description: StartTime is the snapshot starting time - type: string - type: object - type: array - namespace: - description: Namespace is the RADOS namespace the image is part of - type: string - pool: - description: Pool is the pool name - type: string - type: object - nullable: true - type: array - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephclients.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephClient - listKind: CephClientList - plural: cephclients - singular: cephclient - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephClient represents a Ceph Client - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec represents the specification of a Ceph Client - properties: - caps: - additionalProperties: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - name: - type: string - required: - - caps - type: object - status: - description: Status represents the status of a Ceph Client - properties: - info: - additionalProperties: - type: string - nullable: true - type: object - phase: - description: ConditionType represent a resource's status - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephclusters.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephCluster - listKind: CephClusterList - plural: cephclusters - singular: cephcluster - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Directory used on the K8s nodes - jsonPath: .spec.dataDirHostPath - name: DataDirHostPath - type: string - - description: Number of MONs - jsonPath: .spec.mon.count - name: MonCount - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Phase - jsonPath: .status.phase - name: Phase - type: string - - description: Message - jsonPath: .status.message - name: Message - type: string - - description: Ceph Health - jsonPath: .status.ceph.health - name: Health - type: string - - jsonPath: .spec.external.enable - name: External - type: boolean - name: v1 - schema: - openAPIV3Schema: - description: CephCluster is a Ceph storage cluster - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ClusterSpec represents the specification of Ceph Cluster - properties: - annotations: - additionalProperties: - additionalProperties: - type: string - description: Annotations are annotations - type: object - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - cephVersion: - description: The version information that instructs Rook to orchestrate a particular version of Ceph. - nullable: true - properties: - allowUnsupported: - description: Whether to allow unsupported versions (do not set to true in production) - type: boolean - image: - description: Image is the container image used to launch the ceph daemons, such as quay.io/ceph/ceph: The full list of images can be found at https://quay.io/repository/ceph/ceph?tab=tags - type: string - type: object - cleanupPolicy: - description: Indicates user intent when deleting a cluster; blocks orchestration and should not be set if cluster deletion is not imminent. - nullable: true - properties: - allowUninstallWithVolumes: - description: AllowUninstallWithVolumes defines whether we can proceed with the uninstall if they are RBD images still present - type: boolean - confirmation: - description: Confirmation represents the cleanup confirmation - nullable: true - pattern: ^$|^yes-really-destroy-data$ - type: string - sanitizeDisks: - description: SanitizeDisks represents way we sanitize disks - nullable: true - properties: - dataSource: - description: DataSource is the data source to use to sanitize the disk with - enum: - - zero - - random - type: string - iteration: - description: Iteration is the number of pass to apply the sanitizing - format: int32 - type: integer - method: - description: Method is the method we use to sanitize disks - enum: - - complete - - quick - type: string - type: object - type: object - continueUpgradeAfterChecksEvenIfNotHealthy: - description: ContinueUpgradeAfterChecksEvenIfNotHealthy defines if an upgrade should continue even if PGs are not clean - type: boolean - crashCollector: - description: A spec for the crash controller - nullable: true - properties: - daysToRetain: - description: DaysToRetain represents the number of days to retain crash until they get pruned - type: integer - disable: - description: Disable determines whether we should enable the crash collector - type: boolean - type: object - dashboard: - description: Dashboard settings - nullable: true - properties: - enabled: - description: Enabled determines whether to enable the dashboard - type: boolean - port: - description: Port is the dashboard webserver port - maximum: 65535 - minimum: 0 - type: integer - ssl: - description: SSL determines whether SSL should be used - type: boolean - urlPrefix: - description: URLPrefix is a prefix for all URLs to use the dashboard with a reverse proxy - type: string - type: object - dataDirHostPath: - description: The path on the host where config and data can be persisted - pattern: ^/(\S+) - type: string - disruptionManagement: - description: A spec for configuring disruption management. - nullable: true - properties: - machineDisruptionBudgetNamespace: - description: Namespace to look for MDBs by the machineDisruptionBudgetController - type: string - manageMachineDisruptionBudgets: - description: This enables management of machinedisruptionbudgets - type: boolean - managePodBudgets: - description: This enables management of poddisruptionbudgets - type: boolean - osdMaintenanceTimeout: - description: OSDMaintenanceTimeout sets how many additional minutes the DOWN/OUT interval is for drained failure domains it only works if managePodBudgets is true. the default is 30 minutes - format: int64 - type: integer - pgHealthCheckTimeout: - description: PGHealthCheckTimeout is the time (in minutes) that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. Rook will continue with the next drain if the timeout exceeds. It only works if managePodBudgets is true. No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. - format: int64 - type: integer - type: object - external: - description: Whether the Ceph Cluster is running external to this Kubernetes cluster mon, mgr, osd, mds, and discover daemons will not be created for external clusters. - nullable: true - properties: - enable: - description: Enable determines whether external mode is enabled or not - type: boolean - type: object - x-kubernetes-preserve-unknown-fields: true - healthCheck: - description: Internal daemon healthchecks and liveness probe - nullable: true - properties: - daemonHealth: - description: DaemonHealth is the health check for a given daemon - nullable: true - properties: - mon: - description: Monitor represents the health check settings for the Ceph monitor - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - osd: - description: ObjectStorageDaemon represents the health check settings for the Ceph OSDs - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - status: - description: Status represents the health check settings for the Ceph health - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - livenessProbe: - additionalProperties: - description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon - properties: - disabled: - description: Disabled determines whether probe is disable or not - type: boolean - probe: - description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. - properties: - exec: - description: One and only one of the following should be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - type: object - description: LivenessProbe allows to change the livenessprobe configuration for a given daemon - type: object - type: object - labels: - additionalProperties: - additionalProperties: - type: string - description: Labels are label for a given daemons - type: object - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - logCollector: - description: Logging represents loggings settings - nullable: true - properties: - enabled: - description: Enabled represents whether the log collector is enabled - type: boolean - periodicity: - description: Periodicity is the periodicity of the log rotation - type: string - type: object - mgr: - description: A spec for mgr related options - nullable: true - properties: - allowMultiplePerNode: - description: AllowMultiplePerNode allows to run multiple managers on the same node (not recommended) - type: boolean - count: - description: Count is the number of manager to run - maximum: 2 - minimum: 0 - type: integer - modules: - description: Modules is the list of ceph manager modules to enable/disable - items: - description: Module represents mgr modules that the user wants to enable or disable - properties: - enabled: - description: Enabled determines whether a module should be enabled or not - type: boolean - name: - description: Name is the name of the ceph manager module - type: string - type: object - nullable: true - type: array - type: object - mon: - description: A spec for mon related options - nullable: true - properties: - allowMultiplePerNode: - description: AllowMultiplePerNode determines if we can run multiple monitors on the same node (not recommended) - type: boolean - count: - description: Count is the number of Ceph monitors - minimum: 0 - type: integer - stretchCluster: - description: StretchCluster is the stretch cluster specification - properties: - failureDomainLabel: - description: 'FailureDomainLabel the failure domain name (e,g: zone)' - type: string - subFailureDomain: - description: SubFailureDomain is the failure domain within a zone - type: string - zones: - description: Zones is the list of zones - items: - description: StretchClusterZoneSpec represents the specification of a stretched zone in a Ceph Cluster - properties: - arbiter: - description: Arbiter determines if the zone contains the arbiter - type: boolean - name: - description: Name is the name of the zone - type: string - volumeClaimTemplate: - description: VolumeClaimTemplate is the PVC template - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - nullable: true - type: array - type: object - volumeClaimTemplate: - description: VolumeClaimTemplate is the PVC definition - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - monitoring: - description: Prometheus based Monitoring settings - nullable: true - properties: - enabled: - description: Enabled determines whether to create the prometheus rules for the ceph cluster. If true, the prometheus types must exist or the creation will fail. - type: boolean - externalMgrEndpoints: - description: ExternalMgrEndpoints points to an existing Ceph prometheus exporter endpoint - items: - description: EndpointAddress is a tuple that describes single IP address. - properties: - hostname: - description: The Hostname of this endpoint - type: string - ip: - description: 'The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. TODO: This should allow hostname or IP, See #4447.' - type: string - nodeName: - description: 'Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.' - type: string - targetRef: - description: Reference to object providing the endpoint. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string - type: object - required: - - ip - type: object - nullable: true - type: array - externalMgrPrometheusPort: - description: ExternalMgrPrometheusPort Prometheus exporter port - maximum: 65535 - minimum: 0 - type: integer - rulesNamespace: - description: RulesNamespace is the namespace where the prometheus rules and alerts should be created. If empty, the same namespace as the cluster will be used. - type: string - type: object - network: - description: Network related configuration - nullable: true - properties: - dualStack: - description: DualStack determines whether Ceph daemons should listen on both IPv4 and IPv6 - type: boolean - hostNetwork: - description: HostNetwork to enable host network - type: boolean - ipFamily: - default: IPv4 - description: IPFamily is the single stack IPv6 or IPv4 protocol - enum: - - IPv4 - - IPv6 - nullable: true - type: string - provider: - description: Provider is what provides network connectivity to the cluster e.g. "host" or "multus" - nullable: true - type: string - selectors: - additionalProperties: - type: string - description: Selectors string values describe what networks will be used to connect the cluster. Meanwhile the keys describe each network respective responsibilities or any metadata storage provider decide. - nullable: true - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - placement: - additionalProperties: - description: Placement is the placement for an object - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - description: The placement-related configuration to pass to kubernetes (affinity, node selector, tolerations). - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - priorityClassNames: - additionalProperties: - type: string - description: PriorityClassNames sets priority classes on components - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - removeOSDsIfOutAndSafeToRemove: - description: Remove the OSD that is out and safe to remove only if this option is true - type: boolean - resources: - additionalProperties: - description: ResourceRequirements describes the compute resource requirements. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - description: Resources set resource requests and limits - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - security: - description: Security represents security settings - nullable: true - properties: - kms: - description: KeyManagementService is the main Key Management option - nullable: true - properties: - connectionDetails: - additionalProperties: - type: string - description: ConnectionDetails contains the KMS connection details (address, port etc) - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - tokenSecretName: - description: TokenSecretName is the kubernetes secret containing the KMS token - type: string - type: object - type: object - skipUpgradeChecks: - description: SkipUpgradeChecks defines if an upgrade should be forced even if one of the check fails - type: boolean - storage: - description: A spec for available storage in the cluster and how it should be used - nullable: true - properties: - config: - additionalProperties: - type: string - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - deviceFilter: - description: A regular expression to allow more fine-grained selection of devices on nodes across the cluster - type: string - devicePathFilter: - description: A regular expression to allow more fine-grained selection of devices with path names - type: string - devices: - description: List of devices to use as storage devices - items: - description: Device represents a disk to use in the cluster - properties: - config: - additionalProperties: - type: string - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - fullpath: - type: string - name: - type: string - type: object - nullable: true - type: array - x-kubernetes-preserve-unknown-fields: true - nodes: - items: - description: Node is a storage nodes - properties: - config: - additionalProperties: - type: string - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - deviceFilter: - description: A regular expression to allow more fine-grained selection of devices on nodes across the cluster - type: string - devicePathFilter: - description: A regular expression to allow more fine-grained selection of devices with path names - type: string - devices: - description: List of devices to use as storage devices - items: - description: Device represents a disk to use in the cluster - properties: - config: - additionalProperties: - type: string - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - fullpath: - type: string - name: - type: string - type: object - nullable: true - type: array - x-kubernetes-preserve-unknown-fields: true - name: - type: string - resources: - description: ResourceRequirements describes the compute resource requirements. - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - useAllDevices: - description: Whether to consume all the storage devices found on a machine - type: boolean - volumeClaimTemplates: - description: PersistentVolumeClaims to use as storage - items: - description: PersistentVolumeClaim is a user's request for and claim to a persistent volume - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - type: array - type: object - nullable: true - type: array - onlyApplyOSDPlacement: - type: boolean - storageClassDeviceSets: - items: - description: StorageClassDeviceSet is a storage class device set - properties: - config: - additionalProperties: - type: string - description: Provider-specific device configuration - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - count: - description: Count is the number of devices in this set - minimum: 1 - type: integer - encrypted: - description: Whether to encrypt the deviceSet - type: boolean - name: - description: Name is a unique identifier for the set - type: string - placement: - description: Placement is the placement for an object - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - portable: - description: Portable represents OSD portability across the hosts - type: boolean - preparePlacement: - description: Placement is the placement for an object - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - resources: - description: ResourceRequirements describes the compute resource requirements. - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - schedulerName: - description: Scheduler name for OSD pod placement - type: string - tuneDeviceClass: - description: TuneSlowDeviceClass Tune the OSD when running on a slow Device Class - type: boolean - tuneFastDeviceClass: - description: TuneFastDeviceClass Tune the OSD when running on a fast Device Class - type: boolean - volumeClaimTemplates: - description: VolumeClaimTemplates is a list of PVC templates for the underlying storage devices - items: - description: PersistentVolumeClaim is a user's request for and claim to a persistent volume - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - type: array - required: - - count - - name - - volumeClaimTemplates - type: object - nullable: true - type: array - useAllDevices: - description: Whether to consume all the storage devices found on a machine - type: boolean - useAllNodes: - type: boolean - volumeClaimTemplates: - description: PersistentVolumeClaims to use as storage - items: - description: PersistentVolumeClaim is a user's request for and claim to a persistent volume - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - type: array - type: object - waitTimeoutForHealthyOSDInMinutes: - description: WaitTimeoutForHealthyOSDInMinutes defines the time the operator would wait before an OSD can be stopped for upgrade or restart. If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. The default wait timeout is 10 minutes. - format: int64 - type: integer - type: object - status: - description: ClusterStatus represents the status of a Ceph cluster - nullable: true - properties: - ceph: - description: CephStatus is the details health of a Ceph Cluster - properties: - capacity: - description: Capacity is the capacity information of a Ceph Cluster - properties: - bytesAvailable: - format: int64 - type: integer - bytesTotal: - format: int64 - type: integer - bytesUsed: - format: int64 - type: integer - lastUpdated: - type: string - type: object - details: - additionalProperties: - description: CephHealthMessage represents the health message of a Ceph Cluster - properties: - message: - type: string - severity: - type: string - required: - - message - - severity - type: object - type: object - health: - type: string - lastChanged: - type: string - lastChecked: - type: string - previousHealth: - type: string - versions: - description: CephDaemonsVersions show the current ceph version for different ceph daemons - properties: - cephfs-mirror: - additionalProperties: - type: integer - description: CephFSMirror shows CephFSMirror Ceph version - type: object - mds: - additionalProperties: - type: integer - description: Mds shows Mds Ceph version - type: object - mgr: - additionalProperties: - type: integer - description: Mgr shows Mgr Ceph version - type: object - mon: - additionalProperties: - type: integer - description: Mon shows Mon Ceph version - type: object - osd: - additionalProperties: - type: integer - description: Osd shows Osd Ceph version - type: object - overall: - additionalProperties: - type: integer - description: Overall shows overall Ceph version - type: object - rbd-mirror: - additionalProperties: - type: integer - description: RbdMirror shows RbdMirror Ceph version - type: object - rgw: - additionalProperties: - type: integer - description: Rgw shows Rgw Ceph version - type: object - type: object - type: object - conditions: - items: - description: Condition represents a status condition on any Rook-Ceph Custom Resource. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - description: ConditionReason is a reason for a condition - type: string - status: - type: string - type: - description: ConditionType represent a resource's status - type: string - type: object - type: array - message: - type: string - phase: - description: ConditionType represent a resource's status - type: string - state: - description: ClusterState represents the state of a Ceph Cluster - type: string - storage: - description: CephStorage represents flavors of Ceph Cluster Storage - properties: - deviceClasses: - items: - description: DeviceClasses represents device classes of a Ceph Cluster - properties: - name: - type: string - type: object - type: array - type: object - version: - description: ClusterVersion represents the version of a Ceph Cluster - properties: - image: - type: string - version: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephfilesystemmirrors.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephFilesystemMirror - listKind: CephFilesystemMirrorList - plural: cephfilesystemmirrors - singular: cephfilesystemmirror - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephFilesystemMirror is the Ceph Filesystem Mirror object definition - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: FilesystemMirroringSpec is the filesystem mirroring specification - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - labels: - additionalProperties: - type: string - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - placement: - description: The affinity to place the rgw pods (default is to place on any available node) - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - priorityClassName: - description: PriorityClassName sets priority class on the cephfs-mirror pods - type: string - resources: - description: The resource requirements for the cephfs-mirror pods - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephfilesystems.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephFilesystem - listKind: CephFilesystemList - plural: cephfilesystems - singular: cephfilesystem - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Number of desired active MDS daemons - jsonPath: .spec.metadataServer.activeCount - name: ActiveMDS - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .status.phase - name: Phase - type: string - name: v1 - schema: - openAPIV3Schema: - description: CephFilesystem represents a Ceph Filesystem - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: FilesystemSpec represents the spec of a file system - properties: - dataPools: - description: The data pool settings - items: - description: PoolSpec represents the spec of ceph pool - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - nullable: true - type: array - metadataPool: - description: The metadata pool settings - nullable: true - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - metadataServer: - description: The mds pod info - properties: - activeCount: - description: The number of metadata servers that are active. The remaining servers in the cluster will be in standby mode. - format: int32 - maximum: 10 - minimum: 1 - type: integer - activeStandby: - description: Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. If false, standbys will still be available, but will not have a warm metadata cache. - type: boolean - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - labels: - additionalProperties: - type: string - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - placement: - description: The affinity to place the mds pods (default is to place on all available node) with a daemonset - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - priorityClassName: - description: PriorityClassName sets priority classes on components - type: string - resources: - description: The resource requirements for the rgw pods - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - activeCount - type: object - mirroring: - description: The mirroring settings - nullable: true - properties: - enabled: - description: Enabled whether this filesystem is mirrored or not - type: boolean - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotRetention: - description: Retention is the retention policy for a snapshot schedule One path has exactly one retention policy. A policy can however contain multiple count-time period pairs in order to specify complex retention policies - items: - description: SnapshotScheduleRetentionSpec is a retention policy - properties: - duration: - description: Duration represents the retention duration for a snapshot - type: string - path: - description: Path is the path to snapshot - type: string - type: object - type: array - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored filesystems - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - preserveFilesystemOnDelete: - description: Preserve the fs in the cluster on CephFilesystem CR deletion. Setting this to true automatically implies PreservePoolsOnDelete is true. - type: boolean - preservePoolsOnDelete: - description: Preserve pools on filesystem deletion - type: boolean - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - dataPools - - metadataPool - - metadataServer - type: object - status: - description: CephFilesystemStatus represents the status of a Ceph Filesystem - properties: - info: - additionalProperties: - type: string - description: Use only info and put mirroringStatus in it? - nullable: true - type: object - mirroringStatus: - description: MirroringStatus is the filesystem mirroring status - properties: - daemonsStatus: - description: PoolMirroringStatus is the mirroring status of a filesystem - items: - description: FilesystemMirrorInfoSpec is the filesystem mirror status of a given filesystem - properties: - daemon_id: - description: DaemonID is the cephfs-mirror name - type: integer - filesystems: - description: Filesystems is the list of filesystems managed by a given cephfs-mirror daemon - items: - description: FilesystemsSpec is spec for the mirrored filesystem - properties: - directory_count: - description: DirectoryCount is the number of directories in the filesystem - type: integer - filesystem_id: - description: FilesystemID is the filesystem identifier - type: integer - name: - description: Name is name of the filesystem - type: string - peers: - description: Peers represents the mirroring peers - items: - description: FilesystemMirrorInfoPeerSpec is the specification of a filesystem peer mirror - properties: - remote: - description: Remote are the remote cluster information - properties: - client_name: - description: ClientName is cephx name - type: string - cluster_name: - description: ClusterName is the name of the cluster - type: string - fs_name: - description: FsName is the filesystem name - type: string - type: object - stats: - description: Stats are the stat a peer mirror - properties: - failure_count: - description: FailureCount is the number of mirroring failure - type: integer - recovery_count: - description: RecoveryCount is the number of recovery attempted after failures - type: integer - type: object - uuid: - description: UUID is the peer unique identifier - type: string - type: object - type: array - type: object - type: array - type: object - nullable: true - type: array - details: - description: Details contains potential status errors - type: string - lastChanged: - description: LastChanged is the last time time the status last changed - type: string - lastChecked: - description: LastChecked is the last time time the status was checked - type: string - type: object - phase: - description: ConditionType represent a resource's status - type: string - snapshotScheduleStatus: - description: FilesystemSnapshotScheduleStatusSpec is the status of the snapshot schedule - properties: - details: - description: Details contains potential status errors - type: string - lastChanged: - description: LastChanged is the last time time the status last changed - type: string - lastChecked: - description: LastChecked is the last time time the status was checked - type: string - snapshotSchedules: - description: SnapshotSchedules is the list of snapshots scheduled - items: - description: FilesystemSnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool - properties: - fs: - description: Fs is the name of the Ceph Filesystem - type: string - path: - description: Path is the path on the filesystem - type: string - rel_path: - type: string - retention: - description: FilesystemSnapshotScheduleStatusRetention is the retention specification for a filesystem snapshot schedule - properties: - active: - description: Active is whether the scheduled is active or not - type: boolean - created: - description: Created is when the snapshot schedule was created - type: string - created_count: - description: CreatedCount is total amount of snapshots - type: integer - first: - description: First is when the first snapshot schedule was taken - type: string - last: - description: Last is when the last snapshot schedule was taken - type: string - last_pruned: - description: LastPruned is when the last snapshot schedule was pruned - type: string - pruned_count: - description: PrunedCount is total amount of pruned snapshots - type: integer - start: - description: Start is when the snapshot schedule starts - type: string - type: object - schedule: - type: string - subvol: - description: Subvol is the name of the sub volume - type: string - type: object - nullable: true - type: array - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephnfses.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephNFS - listKind: CephNFSList - plural: cephnfses - shortNames: - - nfs - singular: cephnfs - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephNFS represents a Ceph NFS - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: NFSGaneshaSpec represents the spec of an nfs ganesha server - properties: - rados: - description: RADOS is the Ganesha RADOS specification - properties: - namespace: - description: Namespace is the RADOS namespace where NFS client recovery data is stored. - type: string - pool: - description: Pool is the RADOS pool where NFS client recovery data is stored. - type: string - required: - - namespace - - pool - type: object - server: - description: Server is the Ganesha Server specification - properties: - active: - description: The number of active Ganesha servers - type: integer - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - labels: - additionalProperties: - type: string - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - logLevel: - description: LogLevel set logging level - type: string - placement: - description: The affinity to place the ganesha pods - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - priorityClassName: - description: PriorityClassName sets the priority class on the pods - type: string - resources: - description: Resources set resource requests and limits - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - active - type: object - required: - - rados - - server - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephobjectrealms.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectRealm - listKind: CephObjectRealmList - plural: cephobjectrealms - singular: cephobjectrealm - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephObjectRealm represents a Ceph Object Store Gateway Realm - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ObjectRealmSpec represent the spec of an ObjectRealm - nullable: true - properties: - pull: - description: PullSpec represents the pulling specification of a Ceph Object Storage Gateway Realm - properties: - endpoint: - type: string - required: - - endpoint - type: object - required: - - pull - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephobjectstores.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectStore - listKind: CephObjectStoreList - plural: cephobjectstores - singular: cephobjectstore - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephObjectStore represents a Ceph Object Store Gateway - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ObjectStoreSpec represent the spec of a pool - properties: - dataPool: - description: The data pool settings - nullable: true - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - gateway: - description: The rgw pod info - nullable: true - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - caBundleRef: - description: The name of the secret that stores custom ca-bundle with root and intermediate certificates. - nullable: true - type: string - externalRgwEndpoints: - description: ExternalRgwEndpoints points to external rgw endpoint(s) - items: - description: EndpointAddress is a tuple that describes single IP address. - properties: - hostname: - description: The Hostname of this endpoint - type: string - ip: - description: 'The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. TODO: This should allow hostname or IP, See #4447.' - type: string - nodeName: - description: 'Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.' - type: string - targetRef: - description: Reference to object providing the endpoint. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string - type: object - required: - - ip - type: object - nullable: true - type: array - instances: - description: The number of pods in the rgw replicaset. - format: int32 - nullable: true - type: integer - labels: - additionalProperties: - type: string - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - placement: - description: The affinity to place the rgw pods (default is to place on any available node) - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - port: - description: The port the rgw service will be listening on (http) - format: int32 - type: integer - priorityClassName: - description: PriorityClassName sets priority classes on the rgw pods - type: string - resources: - description: The resource requirements for the rgw pods - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - securePort: - description: The port the rgw service will be listening on (https) - format: int32 - maximum: 65535 - minimum: 0 - nullable: true - type: integer - service: - description: The configuration related to add/set on each rgw service. - nullable: true - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each rgw service. nullable optional - type: object - type: object - sslCertificateRef: - description: The name of the secret that stores the ssl certificate for secure rgw connections - nullable: true - type: string - type: object - healthCheck: - description: The rgw Bucket healthchecks and liveness probe - nullable: true - properties: - bucket: - description: HealthCheckSpec represents the health check of an object store bucket - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - livenessProbe: - description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon - properties: - disabled: - description: Disabled determines whether probe is disable or not - type: boolean - probe: - description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. - properties: - exec: - description: One and only one of the following should be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - type: object - type: object - metadataPool: - description: The metadata pool settings - nullable: true - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - preservePoolsOnDelete: - description: Preserve pools on object store deletion - type: boolean - security: - description: Security represents security settings - nullable: true - properties: - kms: - description: KeyManagementService is the main Key Management option - nullable: true - properties: - connectionDetails: - additionalProperties: - type: string - description: ConnectionDetails contains the KMS connection details (address, port etc) - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - tokenSecretName: - description: TokenSecretName is the kubernetes secret containing the KMS token - type: string - type: object - type: object - zone: - description: The multisite info - nullable: true - properties: - name: - description: RGW Zone the Object Store is in - type: string - required: - - name - type: object - type: object - status: - description: ObjectStoreStatus represents the status of a Ceph Object Store resource - properties: - bucketStatus: - description: BucketStatus represents the status of a bucket - properties: - details: - type: string - health: - description: ConditionType represent a resource's status - type: string - lastChanged: - type: string - lastChecked: - type: string - type: object - conditions: - items: - description: Condition represents a status condition on any Rook-Ceph Custom Resource. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - description: ConditionReason is a reason for a condition - type: string - status: - type: string - type: - description: ConditionType represent a resource's status - type: string - type: object - type: array - info: - additionalProperties: - type: string - nullable: true - type: object - message: - type: string - phase: - description: ConditionType represent a resource's status - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephobjectstoreusers.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectStoreUser - listKind: CephObjectStoreUserList - plural: cephobjectstoreusers - shortNames: - - rcou - - objectuser - singular: cephobjectstoreuser - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephObjectStoreUser represents a Ceph Object Store Gateway User - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ObjectStoreUserSpec represent the spec of an Objectstoreuser - properties: - displayName: - description: The display name for the ceph users - type: string - store: - description: The store the user will be created in - type: string - type: object - status: - description: ObjectStoreUserStatus represents the status Ceph Object Store Gateway User - properties: - info: - additionalProperties: - type: string - nullable: true - type: object - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephobjectzonegroups.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectZoneGroup - listKind: CephObjectZoneGroupList - plural: cephobjectzonegroups - singular: cephobjectzonegroup - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephObjectZoneGroup represents a Ceph Object Store Gateway Zone Group - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ObjectZoneGroupSpec represent the spec of an ObjectZoneGroup - properties: - realm: - description: The display name for the ceph users - type: string - required: - - realm - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephobjectzones.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectZone - listKind: CephObjectZoneList - plural: cephobjectzones - singular: cephobjectzone - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephObjectZone represents a Ceph Object Store Gateway Zone - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ObjectZoneSpec represent the spec of an ObjectZone - properties: - dataPool: - description: The data pool settings - nullable: true - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - metadataPool: - description: The metadata pool settings - nullable: true - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - zoneGroup: - description: The display name for the ceph users - type: string - required: - - dataPool - - metadataPool - - zoneGroup - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: cephrbdmirrors.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephRBDMirror - listKind: CephRBDMirrorList - plural: cephrbdmirrors - singular: cephrbdmirror - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephRBDMirror represents a Ceph RBD Mirror - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: RBDMirroringSpec represents the specification of an RBD mirror daemon - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - count: - description: Count represents the number of rbd mirror instance to run - minimum: 1 - type: integer - labels: - additionalProperties: - type: string - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - placement: - description: The affinity to place the rgw pods (default is to place on any available node) - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - priorityClassName: - description: PriorityClassName sets priority class on the rbd mirror pods - type: string - resources: - description: The resource requirements for the rbd mirror pods - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - count - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: objectbucketclaims.objectbucket.io - annotations: - helm.sh/resource-policy: keep -spec: - group: objectbucket.io - names: - kind: ObjectBucketClaim - listKind: ObjectBucketClaimList - plural: objectbucketclaims - singular: objectbucketclaim - shortNames: - - obc - - obcs - scope: Namespaced - versions: - - name: v1alpha1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - storageClassName: - type: string - bucketName: - type: string - generateBucketName: - type: string - additionalConfig: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - objectBucketName: - type: string - status: - type: object - x-kubernetes-preserve-unknown-fields: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: objectbuckets.objectbucket.io - annotations: - helm.sh/resource-policy: keep -spec: - group: objectbucket.io - names: - kind: ObjectBucket - listKind: ObjectBucketList - plural: objectbuckets - singular: objectbucket - shortNames: - - ob - - obs - scope: Cluster - versions: - - name: v1alpha1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - storageClassName: - type: string - endpoint: - type: object - nullable: true - properties: - bucketHost: - type: string - bucketPort: - type: integer - format: int32 - bucketName: - type: string - region: - type: string - subRegion: - type: string - additionalConfig: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - authentication: - type: object - nullable: true - items: - type: object - x-kubernetes-preserve-unknown-fields: true - additionalState: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - reclaimPolicy: - type: string - claimRef: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - type: object - x-kubernetes-preserve-unknown-fields: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: volumereplicationclasses.replication.storage.openshift.io -spec: - group: replication.storage.openshift.io - names: - kind: VolumeReplicationClass - listKind: VolumeReplicationClassList - plural: volumereplicationclasses - shortNames: - - vrc - singular: volumereplicationclass - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.provisioner - name: provisioner - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: VolumeReplicationClass is the Schema for the volumereplicationclasses API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: VolumeReplicationClassSpec specifies parameters that an underlying storage system uses when creating a volume replica. A specific VolumeReplicationClass is used by specifying its name in a VolumeReplication object. - properties: - parameters: - additionalProperties: - type: string - description: Parameters is a key-value map with storage provisioner specific configurations for creating volume replicas - type: object - provisioner: - description: Provisioner is the name of storage provisioner - type: string - required: - - provisioner - type: object - status: - description: VolumeReplicationClassStatus defines the observed state of VolumeReplicationClass - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: volumereplications.replication.storage.openshift.io -spec: - group: replication.storage.openshift.io - names: - kind: VolumeReplication - listKind: VolumeReplicationList - plural: volumereplications - shortNames: - - vr - singular: volumereplication - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.volumeReplicationClass - name: volumeReplicationClass - type: string - - jsonPath: .spec.dataSource.name - name: pvcName - type: string - - jsonPath: .spec.replicationState - name: desiredState - type: string - - jsonPath: .status.state - name: currentState - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: VolumeReplication is the Schema for the volumereplications API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: VolumeReplicationSpec defines the desired state of VolumeReplication - properties: - dataSource: - description: DataSource represents the object associated with the volume - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - replicationState: - description: ReplicationState represents the replication operation to be performed on the volume. Supported operations are "primary", "secondary" and "resync" - enum: - - primary - - secondary - - resync - type: string - volumeReplicationClass: - description: VolumeReplicationClass is the VolumeReplicationClass name for this VolumeReplication resource - type: string - required: - - dataSource - - replicationState - - volumeReplicationClass - type: object - status: - description: VolumeReplicationStatus defines the observed state of VolumeReplication - properties: - conditions: - description: Conditions are the list of conditions and their status. - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - lastCompletionTime: - format: date-time - type: string - lastStartTime: - format: date-time - type: string - message: - type: string - observedGeneration: - description: observedGeneration is the last generation change the operator has dealt with - format: int64 - type: integer - state: - description: State captures the latest state of the replication operation - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: volumes.rook.io -spec: - group: rook.io - names: - kind: Volume - listKind: VolumeList - plural: volumes - shortNames: - - rv - singular: volume - scope: Namespaced - versions: - - name: v1alpha2 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - attachments: - items: - properties: - clusterName: - type: string - mountDir: - type: string - node: - type: string - podName: - type: string - podNamespace: - type: string - readOnly: - type: boolean - required: - - clusterName - - mountDir - - node - - podName - - podNamespace - - readOnly - type: object - type: array - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - required: - - attachments - - metadata - type: object - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] -{{- else }} -################################################################################################################### -# Create the common resources that are necessary to start the operator and the ceph cluster. -# These resources *must* be created before the operator.yaml and cluster.yaml or their variants. -# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace. -# -# If the operator needs to manage multiple clusters (in different namespaces), see the section below -# for "cluster-specific resources". The resources below that section will need to be created for each namespace -# where the operator needs to manage the cluster. The resources above that section do not be created again. -# -# Most of the sections are prefixed with a 'OLM' keyword which is used to build our CSV for an OLM (Operator Life Cycle manager) -################################################################################################################### ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephclusters.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephCluster - listKind: CephClusterList - plural: cephclusters - singular: cephcluster - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - annotations: {} - cephVersion: - properties: - allowUnsupported: - type: boolean - image: - type: string - dashboard: - properties: - enabled: - type: boolean - urlPrefix: - type: string - port: - type: integer - minimum: 0 - maximum: 65535 - ssl: - type: boolean - dataDirHostPath: - pattern: ^/(\S+) - type: string - disruptionManagement: - properties: - machineDisruptionBudgetNamespace: - type: string - managePodBudgets: - type: boolean - osdMaintenanceTimeout: - type: integer - pgHealthCheckTimeout: - type: integer - manageMachineDisruptionBudgets: - type: boolean - skipUpgradeChecks: - type: boolean - continueUpgradeAfterChecksEvenIfNotHealthy: - type: boolean - waitTimeoutForHealthyOSDInMinutes: - type: integer - mon: - properties: - allowMultiplePerNode: - type: boolean - count: - maximum: 9 - minimum: 0 - type: integer - volumeClaimTemplate: {} - mgr: - properties: - count: - type: integer - minimum: 0 - maximum: 2 - modules: - items: - properties: - name: - type: string - enabled: - type: boolean - network: - properties: - hostNetwork: - type: boolean - provider: - type: string - selectors: {} - storage: - properties: - disruptionManagement: - properties: - machineDisruptionBudgetNamespace: - type: string - managePodBudgets: - type: boolean - osdMaintenanceTimeout: - type: integer - pgHealthCheckTimeout: - type: integer - manageMachineDisruptionBudgets: - type: boolean - useAllNodes: - type: boolean - nodes: - items: - properties: - name: - type: string - config: - properties: - metadataDevice: - type: string - storeType: - type: string - pattern: ^(bluestore)$ - databaseSizeMB: - type: string - walSizeMB: - type: string - journalSizeMB: - type: string - osdsPerDevice: - type: string - encryptedDevice: - type: string - pattern: ^(true|false)$ - useAllDevices: - type: boolean - deviceFilter: - type: string - devicePathFilter: - type: string - devices: - type: array - items: - properties: - name: - type: string - config: {} - resources: {} - useAllDevices: - type: boolean - deviceFilter: - type: string - devicePathFilter: - type: string - config: {} - storageClassDeviceSets: {} - monitoring: - properties: - enabled: - type: boolean - rulesNamespace: - type: string - externalMgrEndpoints: - type: array - items: - properties: - ip: - type: string - removeOSDsIfOutAndSafeToRemove: - type: boolean - external: - properties: - enable: - type: boolean - cleanupPolicy: - properties: - confirmation: - type: string - pattern: ^$|^yes-really-destroy-data$ - sanitizeDisks: - properties: - method: - type: string - pattern: ^(complete|quick)$ - dataSource: - type: string - pattern: ^(zero|random)$ - iteration: - type: integer - format: int32 - security: {} - logCollector: {} - placement: {} - resources: {} - healthCheck: {} - subresources: - status: {} - additionalPrinterColumns: - - name: DataDirHostPath - type: string - description: Directory used on the K8s nodes - JSONPath: .spec.dataDirHostPath - - name: MonCount - type: string - description: Number of MONs - JSONPath: .spec.mon.count - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - - name: Phase - type: string - description: Phase - JSONPath: .status.phase - - name: Message - type: string - description: Message - JSONPath: .status.message - - name: Health - type: string - description: Ceph Health - JSONPath: .status.ceph.health - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephclients.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephClient - listKind: CephClientList - plural: cephclients - singular: cephclient - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - caps: - type: object - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephrbdmirrors.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephRBDMirror - listKind: CephRBDMirrorList - plural: cephrbdmirrors - singular: cephrbdmirror - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - count: - type: integer - minimum: 1 - maximum: 100 - peers: - properties: - secretNames: - type: array - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephfilesystems.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephFilesystem - listKind: CephFilesystemList - plural: cephfilesystems - singular: cephfilesystem - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - metadataServer: - properties: - activeCount: - minimum: 1 - maximum: 10 - type: integer - activeStandby: - type: boolean - annotations: {} - placement: {} - resources: {} - metadataPool: - properties: - failureDomain: - type: string - crushRoot: - type: string - replicated: - properties: - size: - minimum: 0 - maximum: 10 - type: integer - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - minimum: 0 - maximum: 10 - type: integer - codingChunks: - minimum: 0 - maximum: 10 - type: integer - compressionMode: - type: string - enum: - - "" - - none - - passive - - aggressive - - force - dataPools: - type: array - items: - properties: - failureDomain: - type: string - crushRoot: - type: string - replicated: - properties: - size: - minimum: 0 - maximum: 10 - type: integer - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - minimum: 0 - maximum: 10 - type: integer - codingChunks: - minimum: 0 - maximum: 10 - type: integer - compressionMode: - type: string - enum: - - "" - - none - - passive - - aggressive - - force - parameters: - type: object - preservePoolsOnDelete: - type: boolean - preserveFilesystemOnDelete: - type: boolean - additionalPrinterColumns: - - name: ActiveMDS - type: string - description: Number of desired active MDS daemons - JSONPath: .spec.metadataServer.activeCount - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephnfses.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephNFS - listKind: CephNFSList - plural: cephnfses - singular: cephnfs - shortNames: - - nfs - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - rados: - properties: - pool: - type: string - namespace: - type: string - server: - properties: - active: - type: integer - annotations: {} - placement: {} - resources: {} - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectstores.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectStore - listKind: CephObjectStoreList - plural: cephobjectstores - singular: cephobjectstore - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - gateway: - properties: - type: - type: string - sslCertificateRef: {} - port: - type: integer - minimum: 0 - maximum: 65535 - securePort: - type: integer - minimum: 0 - maximum: 65535 - instances: - type: integer - externalRgwEndpoints: - type: array - items: - properties: - ip: - type: string - annotations: {} - placement: {} - resources: {} - metadataPool: - properties: - failureDomain: - type: string - crushRoot: - type: string - replicated: - properties: - size: - type: integer - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - type: integer - codingChunks: - type: integer - compressionMode: - type: string - enum: - - "" - - none - - passive - - aggressive - - force - parameters: - type: object - dataPool: - properties: - failureDomain: - type: string - crushRoot: - type: string - replicated: - properties: - size: - type: integer - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - type: integer - codingChunks: - type: integer - compressionMode: - type: string - enum: - - "" - - none - - passive - - aggressive - - force - parameters: - type: object - preservePoolsOnDelete: - type: boolean - healthCheck: - properties: - bucket: - properties: - disabled: - type: boolean - interval: - type: string - timeout: - type: string - livenessProbe: - type: object - properties: - disabled: - type: boolean - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectstoreusers.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectStoreUser - listKind: CephObjectStoreUserList - plural: cephobjectstoreusers - singular: cephobjectstoreuser - shortNames: - - rcou - - objectuser - scope: Namespaced - version: v1 - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectrealms.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectRealm - listKind: CephObjectRealmList - plural: cephobjectrealms - singular: cephobjectrealm - scope: Namespaced - version: v1 - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectzonegroups.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectZoneGroup - listKind: CephObjectZoneGroupList - plural: cephobjectzonegroups - singular: cephobjectzonegroup - scope: Namespaced - version: v1 - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectzones.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectZone - listKind: CephObjectZoneList - plural: cephobjectzones - singular: cephobjectzone - scope: Namespaced - version: v1 - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephblockpools.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephBlockPool - listKind: CephBlockPoolList - plural: cephblockpools - singular: cephblockpool - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - failureDomain: - type: string - crushRoot: - type: string - replicated: - properties: - size: - type: integer - minimum: 0 - maximum: 9 - targetSizeRatio: - type: number - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - type: integer - minimum: 0 - maximum: 9 - codingChunks: - type: integer - minimum: 0 - maximum: 9 - compressionMode: - type: string - enum: - - "" - - none - - passive - - aggressive - - force - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics - for all RBD images in the pool - type: boolean - parameters: - type: object - mirroring: - properties: - enabled: - type: boolean - mode: - type: string - enum: - - image - - pool - peers: - properties: - secretNames: - type: array - snapshotSchedules: - type: object - properties: - interval: - type: string - startTime: - type: string - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: volumes.rook.io -spec: - group: rook.io - names: - kind: Volume - listKind: VolumeList - plural: volumes - singular: volume - shortNames: - - rv - scope: Namespaced - version: v1alpha2 - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: objectbuckets.objectbucket.io -spec: - group: objectbucket.io - versions: - - name: v1alpha1 - served: true - storage: true - names: - kind: ObjectBucket - listKind: ObjectBucketList - plural: objectbuckets - singular: objectbucket - shortNames: - - ob - - obs - scope: Cluster - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: objectbucketclaims.objectbucket.io -spec: - versions: - - name: v1alpha1 - served: true - storage: true - group: objectbucket.io - names: - kind: ObjectBucketClaim - listKind: ObjectBucketClaimList - plural: objectbucketclaims - singular: objectbucketclaim - shortNames: - - obc - - obcs - scope: Namespaced - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephfilesystemmirrors.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephFilesystemMirror - listKind: CephFilesystemMirrorList - plural: cephfilesystemmirrors - singular: cephfilesystemmirror - scope: Namespaced - version: v1 - subresources: - status: {} - -{{- end }} -{{- end }} diff --git a/cluster/charts/rook-ceph/templates/role.yaml b/cluster/charts/rook-ceph/templates/role.yaml deleted file mode 100644 index 70f899c5d..000000000 --- a/cluster/charts/rook-ceph/templates/role.yaml +++ /dev/null @@ -1,192 +0,0 @@ -{{- if .Values.rbacEnable }} -# The role for the operator to manage resources in its own namespace -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: rook-ceph-system - namespace: {{ .Release.Namespace }} - labels: - operator: rook - storage-backend: ceph -rules: -- apiGroups: - - "" - resources: - - pods - - configmaps - - services - verbs: - - get - - list - - watch - - patch - - create - - update - - delete -- apiGroups: - - apps - - extensions - resources: - - daemonsets - - statefulsets - - deployments - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - - prometheusrules - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - batch - resources: - - cronjobs - verbs: - - delete ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd - namespace: {{ .Release.Namespace }} -rules: -- apiGroups: [""] - resources: ["configmaps"] - verbs: [ "get", "list", "watch", "create", "update", "delete" ] -- apiGroups: ["ceph.rook.io"] - resources: ["cephclusters", "cephclusters/finalizers"] - verbs: [ "get", "list", "create", "update", "delete" ] ---- -# Aspects of ceph-mgr that operate within the cluster's namespace -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr - namespace: {{ .Release.Namespace }} -rules: -- apiGroups: - - "" - resources: - - pods - - services - - pods/log - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - batch - resources: - - jobs - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - ceph.rook.io - resources: - - "*" - verbs: - - "*" -- apiGroups: - - apps - resources: - - deployments/scale - - deployments - verbs: - - patch - - delete -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - delete ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter - namespace: {{ .Release.Namespace }} -rules: -- apiGroups: - - "" - resources: - - pods - - configmaps - verbs: - - get - - list - - watch - - create - - update - - delete ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-external-provisioner-cfg - namespace: {{ .Release.Namespace }} -rules: - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "create", "delete"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-external-provisioner-cfg - namespace: {{ .Release.Namespace }} -rules: - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch", "create", "delete", "update"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-purge-osd -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get"] - - apiGroups: ["apps"] - resources: ["deployments"] - verbs: ["get", "delete" ] - - apiGroups: ["batch"] - resources: ["jobs"] - verbs: ["get", "list", "delete" ] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["delete"] -{{- end }} diff --git a/cluster/charts/rook-ceph/templates/rolebinding.yaml b/cluster/charts/rook-ceph/templates/rolebinding.yaml deleted file mode 100644 index aeda4636a..000000000 --- a/cluster/charts/rook-ceph/templates/rolebinding.yaml +++ /dev/null @@ -1,134 +0,0 @@ -{{- if .Values.rbacEnable }} -# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-system - namespace: {{ .Release.Namespace }} - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-system -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: {{ .Release.Namespace }} ---- - # Allow the operator to create resources in this cluster's namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cluster-mgmt - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-cluster-mgmt -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: {{ .Release.Namespace }} ---- -# Allow the osd pods in this namespace to work with configmaps -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-osd -subjects: -- kind: ServiceAccount - name: rook-ceph-osd - namespace: {{ .Release.Namespace }} ---- -# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-mgr -subjects: -- kind: ServiceAccount - name: rook-ceph-mgr - namespace: {{ .Release.Namespace }} ---- -# Allow the ceph mgr to access the rook system resources necessary for the mgr modules -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-system - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-mgr-system -subjects: -- kind: ServiceAccount - name: rook-ceph-mgr - namespace: {{ .Release.Namespace }} ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-cmd-reporter -subjects: -- kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: {{ .Release.Namespace }} ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-provisioner-role-cfg - namespace: {{ .Release.Namespace }} -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: cephfs-external-provisioner-cfg - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-provisioner-role-cfg - namespace: {{ .Release.Namespace }} -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: rbd-external-provisioner-cfg - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-purge-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-purge-osd - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: rook-ceph-purge-osd - apiGroup: rbac.authorization.k8s.io -{{- end }} diff --git a/cluster/charts/rook-ceph/templates/serviceaccount.yaml b/cluster/charts/rook-ceph/templates/serviceaccount.yaml deleted file mode 100644 index 5d3687f67..000000000 --- a/cluster/charts/rook-ceph/templates/serviceaccount.yaml +++ /dev/null @@ -1,93 +0,0 @@ -# Service account for the operator -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-system - namespace: {{ .Release.Namespace }} - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{ template "imagePullSecrets" . }} ---- -# Service account for the Ceph OSDs. Must exist and cannot be renamed. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-osd - namespace: {{ .Release.Namespace }} - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{ template "imagePullSecrets" . }} ---- -# Service account for the Ceph Mgr. Must exist and cannot be renamed. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-mgr - namespace: {{ .Release.Namespace }} - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{ template "imagePullSecrets" . }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-cmd-reporter - namespace: {{ .Release.Namespace }} - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{ template "imagePullSecrets" . }} ---- -# Service account for the cephfs csi driver -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-cephfs-plugin-sa - namespace: {{ .Release.Namespace }} -{{ template "imagePullSecrets" . }} ---- -# Service account for the cephfs csi provisioner -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-cephfs-provisioner-sa - namespace: {{ .Release.Namespace }} -{{ template "imagePullSecrets" . }} ---- -# Service account for the rbd csi driver -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-rbd-plugin-sa - namespace: {{ .Release.Namespace }} -{{ template "imagePullSecrets" . }} ---- -# Service account for the rbd csi provisioner -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-rbd-provisioner-sa - namespace: {{ .Release.Namespace }} -{{ template "imagePullSecrets" . }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-admission-controller - namespace: {{ .Release.Namespace }} ---- -# Service account for the purge osd job -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-purge-osd - namespace: {{ .Release.Namespace }} -{{ template "imagePullSecrets" . }} ---- \ No newline at end of file diff --git a/cluster/charts/rook-ceph/values.yaml b/cluster/charts/rook-ceph/values.yaml deleted file mode 100644 index 9d7962a43..000000000 --- a/cluster/charts/rook-ceph/values.yaml +++ /dev/null @@ -1,370 +0,0 @@ -# Default values for rook-ceph-operator -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -image: - prefix: rook - repository: rook/ceph - tag: VERSION - pullPolicy: IfNotPresent - -crds: - # Whether the helm chart should create and update the CRDs. If false, the CRDs must be - # managed independently with cluster/examples/kubernetes/ceph/crds.yaml. - # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED. - # If the CRDs are deleted in this case, see the disaster recovery guide to restore them. - # https://rook.github.io/docs/rook/master/ceph-disaster-recovery.html#restoring-crds-after-deletion - enabled: true - -resources: - limits: - cpu: 500m - memory: 256Mi - requests: - cpu: 100m - memory: 128Mi - -nodeSelector: {} -# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`. -# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector -# disktype: ssd - -# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints -tolerations: [] - -# Delay to use in node.kubernetes.io/unreachable toleration -unreachableNodeTolerationSeconds: 5 - -# Whether rook watches its current namespace for CRDs or the entire cluster, defaults to false -currentNamespaceOnly: false - -## Annotations to be added to pod -annotations: {} - -## LogLevel can be set to: TRACE, DEBUG, INFO, NOTICE, WARNING, ERROR or CRITICAL -logLevel: INFO - -## If true, create & use RBAC resources -## -rbacEnable: true - -## If true, create & use PSP resources -## -pspEnable: true - -## Settings for whether to disable the drivers or other daemons if they are not -## needed -csi: - enableRbdDriver: true - enableCephfsDriver: true - enableGrpcMetrics: false - # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary - # in some network configurations where the SDN does not provide access to an external cluster or - # there is significant drop in read/write performance. - # enableCSIHostNetwork: true - # set to false to disable deployment of snapshotter container in CephFS provisioner pod. - enableCephfsSnapshotter: true - # set to false to disable deployment of snapshotter container in RBD provisioner pod. - enableRBDSnapshotter: true - # (Optional) set user created priorityclassName for csi plugin pods. - # pluginPriorityClassName: system-node-critical - - # (Optional) set user created priorityclassName for csi provisioner pods. - # provisionerPriorityClassName: system-cluster-critical - - # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted. - # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html - rbdFSGroupPolicy: "ReadWriteOnceWithFSType" - - # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted. - # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html - cephFSFSGroupPolicy: "None" - - # OMAP generator generates the omap mapping between the PV name and the RBD image - # which helps CSI to identify the rbd images for CSI operations. - # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature. - # By default OMAP generator is disabled and when enabled it will be deployed as a - # sidecar with CSI provisioner pod, to enable set it to true. - enableOMAPGenerator: false - - # Set logging level for csi containers. - # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. - #logLevel: 0 - # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. - # Default value is RollingUpdate. - #rbdPluginUpdateStrategy: OnDelete - # CSI Rbd plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. - # Default value is RollingUpdate. - #cephFSPluginUpdateStrategy: OnDelete - # Allow starting unsupported ceph-csi image - allowUnsupportedVersion: false - # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource - # requests and limits you want to apply for provisioner pod - # csiRBDProvisionerResource: | - # - name : csi-provisioner - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-resizer - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-attacher - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-snapshotter - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-rbdplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource - # requests and limits you want to apply for plugin pod - # csiRBDPluginResource: | - # - name : driver-registrar - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # - name : csi-rbdplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource - # requests and limits you want to apply for provisioner pod - # csiCephFSProvisionerResource: | - # - name : csi-provisioner - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-resizer - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-attacher - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-cephfsplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource - # requests and limits you want to apply for plugin pod - # csiCephFSPluginResource: | - # - name : driver-registrar - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # - name : csi-cephfsplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # Set provisonerTolerations and provisionerNodeAffinity for provisioner pod. - # The CSI provisioner would be best to start on the same nodes as other ceph daemons. - # provisionerTolerations: - # - key: key - # operator: Exists - # effect: NoSchedule - # provisionerNodeAffinity: key1=value1,value2; key2=value3 - # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods. - # The CSI plugins need to be started on all the nodes where the clients need to mount the storage. - # pluginTolerations: - # - key: key - # operator: Exists - # effect: NoSchedule - # pluginNodeAffinity: key1=value1,value2; key2=value3 - #cephfsGrpcMetricsPort: 9091 - #cephfsLivenessMetricsPort: 9081 - #rbdGrpcMetricsPort: 9090 - # Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS - # you may want to disable this setting. However, this will cause an issue during upgrades - # with the FUSE client. See the upgrade guide: https://rook.io/docs/rook/v1.2/ceph-upgrade.html - forceCephFSKernelClient: true - #rbdLivenessMetricsPort: 9080 - #kubeletDirPath: /var/lib/kubelet - #cephcsi: - #image: quay.io/cephcsi/cephcsi:v3.4.0 - #registrar: - #image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 - #provisioner: - #image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2 - #snapshotter: - #image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1 - #attacher: - #image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1 - #resizer: - #image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0 - # Labels to add to the CSI CephFS Deployments and DaemonSets Pods. - #cephfsPodLabels: "key1=value1,key2=value2" - # Labels to add to the CSI RBD Deployments and DaemonSets Pods. - #rbdPodLabels: "key1=value1,key2=value2" - # Enable volume replication controller - volumeReplication: - enabled: false - #image: "quay.io/csiaddons/volumereplication-operator:v0.1.0" - -enableFlexDriver: false -enableDiscoveryDaemon: false -cephCommandsTimeoutSeconds: "15" - -# enable the ability to have multiple Ceph filesystems in the same cluster -# WARNING: Experimental feature in Ceph Releases Octopus (v15) and Nautilus (v14) -# https://docs.ceph.com/en/octopus/cephfs/experimental-features/#multiple-file-systems-within-a-ceph-cluster -allowMultipleFilesystems: false - -## if true, run rook operator on the host network -# useOperatorHostNetwork: true - -## Rook Agent configuration -## toleration: NoSchedule, PreferNoSchedule or NoExecute -## tolerationKey: Set this to the specific key of the taint to tolerate -## tolerations: Array of tolerations in YAML format which will be added to agent deployment -## nodeAffinity: Set to labels of the node to match -## flexVolumeDirPath: The path where the Rook agent discovers the flex volume plugins -## libModulesDirPath: The path where the Rook agent can find kernel modules -# agent: -# toleration: NoSchedule -# tolerationKey: key -# tolerations: -# - key: key -# operator: Exists -# effect: NoSchedule -# nodeAffinity: key1=value1,value2; key2=value3 -# mountSecurityMode: Any -## For information on FlexVolume path, please refer to https://rook.io/docs/rook/master/flexvolume.html -# flexVolumeDirPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ -# libModulesDirPath: /lib/modules -# mounts: mount1=/host/path:/container/path,/host/path2:/container/path2 - -## Rook Discover configuration -## toleration: NoSchedule, PreferNoSchedule or NoExecute -## tolerationKey: Set this to the specific key of the taint to tolerate -## tolerations: Array of tolerations in YAML format which will be added to agent deployment -## nodeAffinity: Set to labels of the node to match -# discover: -# toleration: NoSchedule -# tolerationKey: key -# tolerations: -# - key: key -# operator: Exists -# effect: NoSchedule -# nodeAffinity: key1=value1,value2; key2=value3 -# podLabels: "key1=value1,key2=value2" - -# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins). -# Disable it here if you have similar issues. -# For more details see https://github.com/rook/rook/issues/2417 -enableSelinuxRelabeling: true - -# Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux, -# the pod must be running privileged in order to write to the hostPath volume, this must be set to true then. -hostpathRequiresPrivileged: false - -# Disable automatic orchestration when new devices are discovered. -disableDeviceHotplug: false - -# Blacklist certain disks according to the regex provided. -discoverDaemonUdev: - -# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts. -# imagePullSecrets: -# - name: my-registry-secret - -# Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used -enableOBCWatchOperatorNamespace: true - -admissionController: - # Set tolerations and nodeAffinity for admission controller pod. - # The admission controller would be best to start on the same nodes as other ceph daemons. - # tolerations: - # - key: key - # operator: Exists - # effect: NoSchedule - # nodeAffinity: key1=value1,value2; key2=value3 diff --git a/cluster/examples/kubernetes/ceph/ceph-client.yaml b/cluster/examples/kubernetes/ceph/ceph-client.yaml deleted file mode 100644 index b4d9baabd..000000000 --- a/cluster/examples/kubernetes/ceph/ceph-client.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -apiVersion: ceph.rook.io/v1 -kind: CephClient -metadata: - name: glance - namespace: rook-ceph # namespace:cluster -spec: - caps: - mon: 'profile rbd' - osd: 'profile rbd pool=images' ---- -apiVersion: ceph.rook.io/v1 -kind: CephClient -metadata: - name: cinder - namespace: rook-ceph # namespace:cluster -spec: - caps: - mon: 'profile rbd' - osd: 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images' diff --git a/cluster/examples/kubernetes/ceph/cluster-external-management.yaml b/cluster/examples/kubernetes/ceph/cluster-external-management.yaml deleted file mode 100644 index c8cd5f90b..000000000 --- a/cluster/examples/kubernetes/ceph/cluster-external-management.yaml +++ /dev/null @@ -1,22 +0,0 @@ -################################################################################################################# -# Define the settings for the rook-ceph-external cluster with common settings for a production cluster. - -# For example, if Rook is not managing any existing cluster in the 'rook-ceph' namespace do: -# kubectl create -f crds.yaml -f common.yaml -f operator.yaml -# kubectl create -f cluster-external.yaml - -# If there is already a cluster managed by Rook in 'rook-ceph' then run: -# kubectl create -f common-external.yaml -f cluster-external-management.yaml -################################################################################################################# -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph-external - namespace: rook-ceph-external # namespace:cluster -spec: - external: - enable: true - dataDirHostPath: /var/lib/rook - # providing an image is required, if you want to create other CRs (rgw, mds, nfs) - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 # Should match external cluster version diff --git a/cluster/examples/kubernetes/ceph/cluster-external.yaml b/cluster/examples/kubernetes/ceph/cluster-external.yaml deleted file mode 100644 index 9f386c321..000000000 --- a/cluster/examples/kubernetes/ceph/cluster-external.yaml +++ /dev/null @@ -1,33 +0,0 @@ -################################################################################################################# -# Define the settings for the rook-ceph-external cluster with common settings for a production cluster. - -# For example, if Rook is not managing any existing cluster in the 'rook-ceph' namespace do: -# kubectl create -f crds.yaml -f common.yaml -f operator.yaml -# kubectl create -f cluster-external.yaml - -# If there is already a cluster managed by Rook in 'rook-ceph' then do: -# kubectl create -f common-external.yaml -# kubectl create -f cluster-external.yaml -################################################################################################################# -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph-external - namespace: rook-ceph-external # namespace:cluster -spec: - external: - enable: true - crashCollector: - disable: true - healthCheck: - daemonHealth: - mon: - disabled: false - interval: 45s - # optionally, the ceph-mgr IP address can be passed to gather metric from the prometheus exporter - # monitoring: - # enabled: true - # rulesNamespace: rook-ceph - # externalMgrEndpoints: - #- ip: ip - # externalMgrPrometheusPort: 9283 diff --git a/cluster/examples/kubernetes/ceph/cluster-on-local-pvc.yaml b/cluster/examples/kubernetes/ceph/cluster-on-local-pvc.yaml deleted file mode 100644 index e8f814c1e..000000000 --- a/cluster/examples/kubernetes/ceph/cluster-on-local-pvc.yaml +++ /dev/null @@ -1,262 +0,0 @@ -################################################################################################################# -# Define the settings for the rook-ceph cluster with common settings for a production cluster on top of bare metal. - -# This example expects three nodes, each with two available disks. Please modify it according to your environment. -# See the documentation for more details on storage settings available. - -# For example, to create the cluster: -# kubectl create -f crds.yaml -f common.yaml -f operator.yaml -# kubectl create -f cluster-on-local-pvc.yaml -################################################################################################################# -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: local-storage -provisioner: kubernetes.io/no-provisioner -volumeBindingMode: WaitForFirstConsumer ---- -kind: PersistentVolume -apiVersion: v1 -metadata: - name: local0-0 -spec: - storageClassName: local-storage - capacity: - storage: 10Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - volumeMode: Block - local: - # If you want to use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. - path: /dev/sdb - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - host0 ---- -kind: PersistentVolume -apiVersion: v1 -metadata: - name: local0-1 -spec: - storageClassName: local-storage - capacity: - storage: 10Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - volumeMode: Block - local: - path: /dev/sdc - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - host0 ---- -kind: PersistentVolume -apiVersion: v1 -metadata: - name: local1-0 -spec: - storageClassName: local-storage - capacity: - storage: 10Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - volumeMode: Block - local: - path: /dev/sdb - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - host1 ---- -kind: PersistentVolume -apiVersion: v1 -metadata: - name: local1-1 -spec: - storageClassName: local-storage - capacity: - storage: 10Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - volumeMode: Block - local: - path: /dev/sdc - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - host1 ---- -kind: PersistentVolume -apiVersion: v1 -metadata: - name: local2-0 -spec: - storageClassName: local-storage - capacity: - storage: 10Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - volumeMode: Block - local: - path: /dev/sdb - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - host2 ---- -kind: PersistentVolume -apiVersion: v1 -metadata: - name: local2-1 -spec: - storageClassName: local-storage - capacity: - storage: 10Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - volumeMode: Block - local: - path: /dev/sdc - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - host2 ---- -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: false - volumeClaimTemplate: - spec: - storageClassName: local-storage - resources: - requests: - storage: 10Gi - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 - allowUnsupported: false - skipUpgradeChecks: false - continueUpgradeAfterChecksEvenIfNotHealthy: false - mgr: - count: 1 - modules: - - name: pg_autoscaler - enabled: true - dashboard: - enabled: true - ssl: true - crashCollector: - disable: false - storage: - storageClassDeviceSets: - - name: set1 - count: 3 - portable: false - tuneDeviceClass: true - tuneFastDeviceClass: false - encrypted: false - placement: - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-osd - - rook-ceph-osd-prepare - preparePlacement: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-osd - - key: app - operator: In - values: - - rook-ceph-osd-prepare - topologyKey: kubernetes.io/hostname - resources: - # These are the OSD daemon limits. For OSD prepare limits, see the separate section below for "prepareosd" resources - # limits: - # cpu: "500m" - # memory: "4Gi" - # requests: - # cpu: "500m" - # memory: "4Gi" - volumeClaimTemplates: - - metadata: - name: data - # if you are looking at giving your OSD a different CRUSH device class than the one detected by Ceph - # annotations: - # crushDeviceClass: hybrid - spec: - resources: - requests: - storage: 10Gi - # IMPORTANT: Change the storage class depending on your environment - storageClassName: local-storage - volumeMode: Block - accessModes: - - ReadWriteOnce - # when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement - onlyApplyOSDPlacement: false - resources: - # prepareosd: - # limits: - # cpu: "200m" - # memory: "200Mi" - # requests: - # cpu: "200m" - # memory: "200Mi" - disruptionManagement: - managePodBudgets: true - osdMaintenanceTimeout: 30 - pgHealthCheckTimeout: 0 - manageMachineDisruptionBudgets: false - machineDisruptionBudgetNamespace: openshift-machine-api diff --git a/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml b/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml deleted file mode 100644 index eb796fc74..000000000 --- a/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml +++ /dev/null @@ -1,200 +0,0 @@ -################################################################################################################# -# Define the settings for the rook-ceph cluster with common settings for a production cluster on top of cloud instances. -# At least three nodes are required in this example. See the documentation for more details on storage settings available. - -# For example, to create the cluster: -# kubectl create -f crds.yaml -f common.yaml -f operator.yaml -# kubectl create -f cluster-on-pvc.yaml -################################################################################################################# -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph # namespace:cluster -spec: - dataDirHostPath: /var/lib/rook - mon: - # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3. - count: 3 - # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. - # Mons should only be allowed on the same node for test environments where data loss is acceptable. - allowMultiplePerNode: false - # A volume claim template can be specified in which case new monitors (and - # monitors created during fail over) will construct a PVC based on the - # template for the monitor's primary storage. Changes to the template do not - # affect existing monitors. Log data is stored on the HostPath under - # dataDirHostPath. If no storage requirement is specified, a default storage - # size appropriate for monitor data will be used. - volumeClaimTemplate: - spec: - storageClassName: gp2 - resources: - requests: - storage: 10Gi - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 - allowUnsupported: false - skipUpgradeChecks: false - continueUpgradeAfterChecksEvenIfNotHealthy: false - mgr: - count: 1 - modules: - - name: pg_autoscaler - enabled: true - dashboard: - enabled: true - ssl: true - crashCollector: - disable: false - storage: - storageClassDeviceSets: - - name: set1 - # The number of OSDs to create from this device set - count: 3 - # IMPORTANT: If volumes specified by the storageClassName are not portable across nodes - # this needs to be set to false. For example, if using the local storage provisioner - # this should be false. - portable: true - # Certain storage class in the Cloud are slow - # Rook can configure the OSD running on PVC to accommodate that by tuning some of the Ceph internal - # Currently, "gp2" has been identified as such - tuneDeviceClass: true - # Certain storage class in the Cloud are fast - # Rook can configure the OSD running on PVC to accommodate that by tuning some of the Ceph internal - # Currently, "managed-premium" has been identified as such - tuneFastDeviceClass: false - # whether to encrypt the deviceSet or not - encrypted: false - # Since the OSDs could end up on any node, an effort needs to be made to spread the OSDs - # across nodes as much as possible. Unfortunately the pod anti-affinity breaks down - # as soon as you have more than one OSD per node. The topology spread constraints will - # give us an even spread on K8s 1.18 or newer. - placement: - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-osd - preparePlacement: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-osd - - key: app - operator: In - values: - - rook-ceph-osd-prepare - topologyKey: kubernetes.io/hostname - topologySpreadConstraints: - - maxSkew: 1 - # IMPORTANT: If you don't have zone labels, change this to another key such as kubernetes.io/hostname - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-osd-prepare - resources: - # These are the OSD daemon limits. For OSD prepare limits, see the separate section below for "prepareosd" resources - # limits: - # cpu: "500m" - # memory: "4Gi" - # requests: - # cpu: "500m" - # memory: "4Gi" - volumeClaimTemplates: - - metadata: - name: data - # if you are looking at giving your OSD a different CRUSH device class than the one detected by Ceph - # annotations: - # crushDeviceClass: hybrid - spec: - resources: - requests: - storage: 10Gi - # IMPORTANT: Change the storage class depending on your environment - storageClassName: gp2 - volumeMode: Block - accessModes: - - ReadWriteOnce - # dedicated block device to store bluestore database (block.db) - # - metadata: - # name: metadata - # spec: - # resources: - # requests: - # # Find the right size https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing - # storage: 5Gi - # # IMPORTANT: Change the storage class depending on your environment - # storageClassName: io1 - # volumeMode: Block - # accessModes: - # - ReadWriteOnce - # dedicated block device to store bluestore wal (block.wal) - # - metadata: - # name: wal - # spec: - # resources: - # requests: - # # Find the right size https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing - # storage: 5Gi - # # IMPORTANT: Change the storage class depending on your environment - # storageClassName: io1 - # volumeMode: Block - # accessModes: - # - ReadWriteOnce - # Scheduler name for OSD pod placement - # schedulerName: osd-scheduler - # when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement. - onlyApplyOSDPlacement: false - resources: - # prepareosd: - # limits: - # cpu: "200m" - # memory: "200Mi" - # requests: - # cpu: "200m" - # memory: "200Mi" - disruptionManagement: - managePodBudgets: true - osdMaintenanceTimeout: 30 - pgHealthCheckTimeout: 0 - manageMachineDisruptionBudgets: false - machineDisruptionBudgetNamespace: openshift-machine-api - # security oriented settings - # security: - # To enable the KMS configuration properly don't forget to uncomment the Secret at the end of the file - # kms: - # # name of the config map containing all the kms connection details - # connectionDetails: - # KMS_PROVIDER: "vault" - # VAULT_ADDR: VAULT_ADDR_CHANGE_ME # e,g: https://vault.my-domain.com:8200 - # VAULT_BACKEND_PATH: "rook" - # VAULT_SECRET_ENGINE: "kv" - # # name of the secret containing the kms authentication token - # tokenSecretName: rook-vault-token -# UNCOMMENT THIS TO ENABLE A KMS CONNECTION -# Also, do not forget to replace both: -# * ROOK_TOKEN_CHANGE_ME: with a base64 encoded value of the token to use -# * VAULT_ADDR_CHANGE_ME: with the Vault address -# --- -# apiVersion: v1 -# kind: Secret -# metadata: -# name: rook-vault-token -# namespace: rook-ceph # namespace:cluster -# data: -# token: ROOK_TOKEN_CHANGE_ME diff --git a/cluster/examples/kubernetes/ceph/cluster-stretched-aws.yaml b/cluster/examples/kubernetes/ceph/cluster-stretched-aws.yaml deleted file mode 100644 index 34ae42e70..000000000 --- a/cluster/examples/kubernetes/ceph/cluster-stretched-aws.yaml +++ /dev/null @@ -1,136 +0,0 @@ -################################################################################################################# -# Define the settings for the rook-ceph cluster with common settings for a production cluster. -# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required -# in this example. See the documentation for more details on storage settings available. - -# For example, to create the cluster: -# kubectl create -f crds.yaml -f common.yaml -f operator.yaml -# kubectl create -f cluster-stretched.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph # namespace:cluster -spec: - dataDirHostPath: /var/lib/rook - mon: - # Five mons must be created for stretch mode - count: 5 - allowMultiplePerNode: false - stretchCluster: - # The ceph failure domain will be extracted from the label, which by default is the zone. The nodes running OSDs must have - # this label in order for the OSDs to be configured in the correct topology. For topology labels, see - # https://rook.github.io/docs/rook/master/ceph-cluster-crd.html#osd-topology. - failureDomainLabel: topology.kubernetes.io/zone - # The sub failure domain is the secondary level at which the data will be placed to maintain data durability and availability. - # The default is "host", which means that each OSD must be on a different node and you would need at least two nodes per zone. - # If the subFailureDomain is set to "osd", the OSDs would be allowed anywhere in the same zone including on the same node. - # If set to "rack" or some other intermediate failure domain, those labels would also need to be set on the nodes where - # the osds are started. - subFailureDomain: host - zones: - - name: us-east-2a - arbiter: true - - name: us-east-2b - - name: us-east-2c - volumeClaimTemplate: - spec: - storageClassName: gp2 - resources: - requests: - storage: 10Gi - mgr: - count: 2 - cephVersion: - # Stretch cluster support upstream is only available starting in Ceph Pacific - image: quay.io/ceph/ceph:v16.2.2 - allowUnsupported: true - skipUpgradeChecks: false - continueUpgradeAfterChecksEvenIfNotHealthy: false - dashboard: - enabled: true - ssl: true - storage: - useAllNodes: false - useAllDevices: false - deviceFilter: "" - storageClassDeviceSets: - - name: set1 - # The number of OSDs to create from this device set - count: 2 - portable: true - placement: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - us-east-2b - preparePlacement: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - us-east-2b - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - storageClassName: gp2 - volumeMode: Block - accessModes: - - ReadWriteOnce - - name: set2 - # The number of OSDs to create from this device set - count: 2 - portable: true - placement: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - us-east-2c - preparePlacement: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - us-east-2c - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - storageClassName: gp2 - volumeMode: Block - accessModes: - - ReadWriteOnce - placement: - # The arbiter mon can have its own placement settings that will be different from the mons. - # If the arbiter section is not included in the placement, the arbiter will use the same placement - # settings as other mons. In this example, the arbiter has a toleration to run on a master node. - arbiter: - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - disruptionManagement: - managePodBudgets: true diff --git a/cluster/examples/kubernetes/ceph/cluster-stretched.yaml b/cluster/examples/kubernetes/ceph/cluster-stretched.yaml deleted file mode 100644 index d26ca53b1..000000000 --- a/cluster/examples/kubernetes/ceph/cluster-stretched.yaml +++ /dev/null @@ -1,74 +0,0 @@ -################################################################################################################# -# Define the settings for the rook-ceph cluster with common settings for a production cluster. -# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required -# in this example. See the documentation for more details on storage settings available. - -# For example, to create the cluster: -# kubectl create -f crds.yaml -f common.yaml -f operator.yaml -# kubectl create -f cluster-stretched.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph # namespace:cluster -spec: - dataDirHostPath: /var/lib/rook - mon: - # Five mons must be created for stretch mode - count: 5 - allowMultiplePerNode: false - stretchCluster: - # The ceph failure domain will be extracted from the label, which by default is the zone. The nodes running OSDs must have - # this label in order for the OSDs to be configured in the correct topology. For topology labels, see - # https://rook.github.io/docs/rook/master/ceph-cluster-crd.html#osd-topology. - failureDomainLabel: topology.kubernetes.io/zone - # The sub failure domain is the secondary level at which the data will be placed to maintain data durability and availability. - # The default is "host", which means that each OSD must be on a different node and you would need at least two nodes per zone. - # If the subFailureDomain is set to "osd", the OSDs would be allowed anywhere in the same zone including on the same node. - # If set to "rack" or some other intermediate failure domain, those labels would also need to be set on the nodes where - # the osds are started. - subFailureDomain: host - zones: - - name: a - arbiter: true - - name: b - - name: c - mgr: - count: 2 - cephVersion: - # Stretch cluster support upstream is only available starting in Ceph Pacific - image: quay.io/ceph/ceph:v16.2.5 - allowUnsupported: true - skipUpgradeChecks: false - continueUpgradeAfterChecksEvenIfNotHealthy: false - dashboard: - enabled: true - ssl: true - storage: - useAllNodes: true - useAllDevices: true - deviceFilter: "" - # OSD placement is expected to include the non-arbiter zones - placement: - # The arbiter mon can have its own placement settings that will be different from the mons. - # If the arbiter section is not included in the placement, the arbiter will use the same placement - # settings as other mons. In this example, the arbiter has a toleration to run on a master node. - arbiter: - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - osd: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - b - - c - disruptionManagement: - managePodBudgets: true diff --git a/cluster/examples/kubernetes/ceph/cluster-test.yaml b/cluster/examples/kubernetes/ceph/cluster-test.yaml deleted file mode 100644 index 9f602c5b2..000000000 --- a/cluster/examples/kubernetes/ceph/cluster-test.yaml +++ /dev/null @@ -1,53 +0,0 @@ -################################################################################################################# -# Define the settings for the rook-ceph cluster with common settings for a small test cluster. -# All nodes with available raw devices will be used for the Ceph cluster. One node is sufficient -# in this example. - -# For example, to create the cluster: -# kubectl create -f crds.yaml -f common.yaml -f operator.yaml -# kubectl create -f cluster-test.yaml -################################################################################################################# -kind: ConfigMap -apiVersion: v1 -metadata: - name: rook-config-override - namespace: rook-ceph # namespace:cluster -data: - config: | - [global] - osd_pool_default_size = 1 - mon_warn_on_pool_no_redundancy = false - bdev_flock_retry = 20 - bluefs_buffered_io = false ---- -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: my-cluster - namespace: rook-ceph # namespace:cluster -spec: - dataDirHostPath: /var/lib/rook - cephVersion: - image: quay.io/ceph/ceph:v16 - allowUnsupported: true - mon: - count: 1 - allowMultiplePerNode: true - mgr: - count: 1 - allowMultiplePerNode: true - dashboard: - enabled: true - crashCollector: - disable: true - storage: - useAllNodes: true - useAllDevices: true - #deviceFilter: - healthCheck: - daemonHealth: - mon: - interval: 45s - timeout: 600s - disruptionManagement: - managePodBudgets: true diff --git a/cluster/examples/kubernetes/ceph/cluster.yaml b/cluster/examples/kubernetes/ceph/cluster.yaml deleted file mode 100644 index cb2ac1ea7..000000000 --- a/cluster/examples/kubernetes/ceph/cluster.yaml +++ /dev/null @@ -1,273 +0,0 @@ -################################################################################################################# -# Define the settings for the rook-ceph cluster with common settings for a production cluster. -# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required -# in this example. See the documentation for more details on storage settings available. - -# For example, to create the cluster: -# kubectl create -f crds.yaml -f common.yaml -f operator.yaml -# kubectl create -f cluster.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph # namespace:cluster -spec: - cephVersion: - # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). - # v14 is nautilus, v15 is octopus, and v16 is pacific. - # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different - # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. - # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.5-20210708 - # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities - image: quay.io/ceph/ceph:v16.2.5 - # Whether to allow unsupported versions of Ceph. Currently `nautilus`, `octopus`, and `pacific` are supported. - # Future versions such as `pacific` would require this to be set to `true`. - # Do not set to true in production. - allowUnsupported: false - # The path on the host where configuration files will be persisted. Must be specified. - # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. - # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. - dataDirHostPath: /var/lib/rook - # Whether or not upgrade should continue even if a check fails - # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise - # Use at your OWN risk - # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades - skipUpgradeChecks: false - # Whether or not continue if PGs are not clean during an upgrade - continueUpgradeAfterChecksEvenIfNotHealthy: false - # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart. - # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one - # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would - # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. - # The default wait timeout is 10 minutes. - waitTimeoutForHealthyOSDInMinutes: 10 - mon: - # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3. - count: 3 - # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. - # Mons should only be allowed on the same node for test environments where data loss is acceptable. - allowMultiplePerNode: false - mgr: - # When higher availability of the mgr is needed, increase the count to 2. - # In that case, one mgr will be active and one in standby. When Ceph updates which - # mgr is active, Rook will update the mgr services to match the active mgr. - count: 1 - modules: - # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules - # are already enabled by other settings in the cluster CR. - - name: pg_autoscaler - enabled: true - # enable the ceph dashboard for viewing cluster status - dashboard: - enabled: true - # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) - # urlPrefix: /ceph-dashboard - # serve the dashboard at the given port. - # port: 8443 - # serve the dashboard using SSL - ssl: true - # enable prometheus alerting for cluster - monitoring: - # requires Prometheus to be pre-installed - enabled: false - # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used. - # Recommended: - # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty. - # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus - # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. - rulesNamespace: rook-ceph - network: - # enable host networking - #provider: host - # enable the Multus network provider - #provider: multus - #selectors: - # The selector keys are required to be `public` and `cluster`. - # Based on the configuration, the operator will do the following: - # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface - # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' - # - # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus - # - #public: public-conf --> NetworkAttachmentDefinition object name in Multus - #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus - # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 - #ipFamily: "IPv6" - # Ceph daemons to listen on both IPv4 and Ipv6 networks - #dualStack: false - # enable the crash collector for ceph daemon crash collection - crashCollector: - disable: false - # Uncomment daysToRetain to prune ceph crash entries older than the - # specified number of days. - #daysToRetain: 30 - # enable log collector, daemons will log on files and rotate - # logCollector: - # enabled: true - # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days. - # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. - cleanupPolicy: - # Since cluster cleanup is destructive to data, confirmation is required. - # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". - # This value should only be set when the cluster is about to be deleted. After the confirmation is set, - # Rook will immediately stop configuring the cluster and only wait for the delete command. - # If the empty string is set, Rook will not destroy any data on hosts during uninstall. - confirmation: "" - # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion - sanitizeDisks: - # method indicates if the entire disk should be sanitized or simply ceph's metadata - # in both case, re-install is possible - # possible choices are 'complete' or 'quick' (default) - method: quick - # dataSource indicate where to get random bytes from to write on the disk - # possible choices are 'zero' (default) or 'random' - # using random sources will consume entropy from the system and will take much more time then the zero source - dataSource: zero - # iteration overwrite N times instead of the default (1) - # takes an integer value - iteration: 1 - # allowUninstallWithVolumes defines how the uninstall should be performed - # If set to true, cephCluster deletion does not wait for the PVs to be deleted. - allowUninstallWithVolumes: false - # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. - # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and - # tolerate taints with a key of 'storage-node'. -# placement: -# all: -# nodeAffinity: -# requiredDuringSchedulingIgnoredDuringExecution: -# nodeSelectorTerms: -# - matchExpressions: -# - key: role -# operator: In -# values: -# - storage-node -# podAffinity: -# podAntiAffinity: -# topologySpreadConstraints: -# tolerations: -# - key: storage-node -# operator: Exists -# The above placement information can also be specified for mon, osd, and mgr components -# mon: -# Monitor deployments may contain an anti-affinity rule for avoiding monitor -# collocation on the same node. This is a required rule when host network is used -# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a -# preferred rule with weight: 50. -# osd: -# mgr: -# cleanup: - annotations: -# all: -# mon: -# osd: -# cleanup: -# prepareosd: -# If no mgr annotations are set, prometheus scrape annotations will be set by default. -# mgr: - labels: -# all: -# mon: -# osd: -# cleanup: -# mgr: -# prepareosd: -# monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. -# These labels can be passed as LabelSelector to Prometheus -# monitoring: - resources: -# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory -# mgr: -# limits: -# cpu: "500m" -# memory: "1024Mi" -# requests: -# cpu: "500m" -# memory: "1024Mi" -# The above example requests/limits can also be added to the other components -# mon: -# osd: -# For OSD it also is a possible to specify requests/limits based on device class -# osd-hdd: -# osd-ssd: -# osd-nvme: -# prepareosd: -# mgr-sidecar: -# crashcollector: -# logcollector: -# cleanup: - # The option to automatically remove OSDs that are out and are safe to destroy. - removeOSDsIfOutAndSafeToRemove: false -# priorityClassNames: -# all: rook-ceph-default-priority-class -# mon: rook-ceph-mon-priority-class -# osd: rook-ceph-osd-priority-class -# mgr: rook-ceph-mgr-priority-class - storage: # cluster level storage configuration and selection - useAllNodes: true - useAllDevices: true - #deviceFilter: - config: - # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map - # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. - # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB - # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller - # osdsPerDevice: "1" # this value can be overridden at the node or device level - # encryptedDevice: "true" # the default value for this option is "false" -# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named -# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. -# nodes: -# - name: "172.17.4.201" -# devices: # specific devices to use for storage can be specified for each node -# - name: "sdb" -# - name: "nvme01" # multiple osds can be created on high performance devices -# config: -# osdsPerDevice: "5" -# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths -# config: # configuration can be specified at the node level which overrides the cluster level config -# - name: "172.17.4.301" -# deviceFilter: "^sd." - # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd - onlyApplyOSDPlacement: false - # The section for configuring management of daemon disruptions during upgrade or fencing. - disruptionManagement: - # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically - # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will - # block eviction of OSDs by default and unblock them safely when drains are detected. - managePodBudgets: true - # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the - # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. - osdMaintenanceTimeout: 30 - # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. - # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. - # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. - pgHealthCheckTimeout: 0 - # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. - # Only available on OpenShift. - manageMachineDisruptionBudgets: false - # Namespace in which to watch for the MachineDisruptionBudgets. - machineDisruptionBudgetNamespace: openshift-machine-api - - # healthChecks - # Valid values for daemons are 'mon', 'osd', 'status' - healthCheck: - daemonHealth: - mon: - disabled: false - interval: 45s - osd: - disabled: false - interval: 60s - status: - disabled: false - interval: 60s - # Change pod liveness probe, it works for all mon,mgr,osd daemons - livenessProbe: - mon: - disabled: false - mgr: - disabled: false - osd: - disabled: false diff --git a/cluster/examples/kubernetes/ceph/common-external.yaml b/cluster/examples/kubernetes/ceph/common-external.yaml deleted file mode 100644 index 04fbd5b24..000000000 --- a/cluster/examples/kubernetes/ceph/common-external.yaml +++ /dev/null @@ -1,77 +0,0 @@ -################################################################################################################### -# Create the common resources that are necessary to start start an external Ceph cluster in a different namespace -# These resources can be created after an operator that is already running but assumes common.yaml has been injected -# The samples all assume that your existing operator running "rook-ceph" namespace will also watch and have permissions -# to interact with an external cluster configured in "rook-ceph-external" cluster. -# -# kubectl create -f crds.yaml -f common.yaml -f operator.yaml -f common-external.yaml -# -# If there is no cluster managed by the current Rook Operator -# you can simply replace all occurrence of rook-ceph-external with rook-ceph -# -# And remove the following code: -# -# apiVersion: v1 -# kind: Namespace -# metadata: -# name: rook-ceph-external -# -# Then kubectl create -f cluster-external.yaml -################################################################################################################### -apiVersion: v1 -kind: Namespace -metadata: - name: rook-ceph-external # namespace:cluster ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cluster-mgmt - namespace: rook-ceph-external # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-cluster-mgmt -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter - namespace: rook-ceph-external # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-cmd-reporter -subjects: - - kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: rook-ceph-external # namespace:cluster ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-cmd-reporter - namespace: rook-ceph-external # namespace:cluster ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter - namespace: rook-ceph-external # namespace:cluster -rules: - - apiGroups: - - "" - resources: - - pods - - configmaps - verbs: - - get - - list - - watch - - create - - update - - delete diff --git a/cluster/examples/kubernetes/ceph/common-second-cluster.yaml b/cluster/examples/kubernetes/ceph/common-second-cluster.yaml deleted file mode 100644 index cbe12e533..000000000 --- a/cluster/examples/kubernetes/ceph/common-second-cluster.yaml +++ /dev/null @@ -1,169 +0,0 @@ -# This is a template to generate the necessary RBAC to deploy a second cluster into a namespace -# It assumes that common.yaml already ran -# -# Run me like: -# NAMESPACE=rook-ceph-secondary envsubst < tests/manifests/common-cluster-secondary.yaml | kubectl create -f - ---- -apiVersion: v1 -kind: Namespace -metadata: - name: $NAMESPACE ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cluster-mgmt - namespace: $NAMESPACE -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-cluster-mgmt -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter - namespace: $NAMESPACE -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-cmd-reporter -subjects: - - kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: $NAMESPACE ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-cmd-reporter - namespace: $NAMESPACE ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter - namespace: $NAMESPACE -rules: - - apiGroups: - - "" - resources: - - pods - - configmaps - verbs: - - get - - list - - watch - - create - - update - - delete ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd - namespace: $NAMESPACE -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: ["ceph.rook.io"] - resources: ["cephclusters", "cephclusters/finalizers"] - verbs: ["get", "list", "create", "update", "delete"] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd-external -rules: - - apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd-external -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-osd-external -subjects: - - kind: ServiceAccount - name: rook-ceph-osd - namespace: $NAMESPACE ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd-external - namespace: $NAMESPACE -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-osd - namespace: $NAMESPACE ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-mgr - namespace: $NAMESPACE ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-osd - namespace: $NAMESPACE ---- -# Aspects of ceph osd purge job that require access to the operator/cluster namespace -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-purge-osd - namespace: $NAMESPACE -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get"] - - apiGroups: ["apps"] - resources: ["deployments"] - verbs: ["get", "delete"] - - apiGroups: ["batch"] - resources: ["jobs"] - verbs: ["get", "list", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["delete"] ---- -# Allow the osd purge job to run in this namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-purge-osd - namespace: $NAMESPACE -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-purge-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-purge-osd - namespace: $NAMESPACE ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-purge-osd - namespace: $NAMESPACE diff --git a/cluster/examples/kubernetes/ceph/common.yaml b/cluster/examples/kubernetes/ceph/common.yaml deleted file mode 100644 index aed387d5e..000000000 --- a/cluster/examples/kubernetes/ceph/common.yaml +++ /dev/null @@ -1,1256 +0,0 @@ -################################################################################################################### -# Create the common resources that are necessary to start the operator and the ceph cluster. -# These resources *must* be created before the operator.yaml and cluster.yaml or their variants. -# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace. -# -# If the operator needs to manage multiple clusters (in different namespaces), see the section below -# for "cluster-specific resources". The resources below that section will need to be created for each namespace -# where the operator needs to manage the cluster. The resources above that section do not be created again. -# -# Most of the sections are prefixed with a 'OLM' keyword which is used to build our CSV for an OLM (Operator Life Cycle manager) -################################################################################################################### - -# Namespace where the operator and other rook resources are created -apiVersion: v1 -kind: Namespace -metadata: - name: rook-ceph # namespace:cluster -# OLM: BEGIN OBJECTBUCKET ROLEBINDING ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-object-bucket -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-object-bucket -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator -# OLM: END OBJECTBUCKET ROLEBINDING -# OLM: BEGIN OPERATOR ROLE ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-admission-controller - namespace: rook-ceph # namespace:operator ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-admission-controller-role -rules: - - apiGroups: ["ceph.rook.io"] - resources: ["*"] - verbs: ["get", "watch", "list"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-admission-controller-rolebinding -subjects: - - kind: ServiceAccount - name: rook-ceph-admission-controller - apiGroup: "" - namespace: rook-ceph # namespace:operator -roleRef: - kind: ClusterRole - name: rook-ceph-admission-controller-role - apiGroup: rbac.authorization.k8s.io ---- -# The cluster role for managing all the cluster-specific resources in a namespace -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-ceph-cluster-mgmt - labels: - operator: rook - storage-backend: ceph -rules: - - apiGroups: - - "" - - apps - - extensions - resources: - - secrets - - pods - - pods/log - - services - - configmaps - - deployments - - daemonsets - verbs: - - get - - list - - watch - - patch - - create - - update - - delete ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-ceph-system - labels: - operator: rook - storage-backend: ceph -rules: - # Most resources are represented by a string representation of their name, such as “pods”, just as it appears in the URL for the relevant API endpoint. - # However, some Kubernetes APIs involve a “subresource”, such as the logs for a pod. [...] - # To represent this in an RBAC role, use a slash to delimit the resource and subresource. - # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources - - apiGroups: [""] - resources: ["pods", "pods/log"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["pods/exec"] - verbs: ["create"] ---- -# The role for the operator to manage resources in its own namespace -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: rook-ceph-system - namespace: rook-ceph # namespace:operator - labels: - operator: rook - storage-backend: ceph -rules: - - apiGroups: - - "" - resources: - - pods - - configmaps - - services - verbs: - - get - - list - - watch - - patch - - create - - update - - delete - - apiGroups: - - apps - - extensions - resources: - - daemonsets - - statefulsets - - deployments - verbs: - - get - - list - - watch - - create - - update - - delete - - apiGroups: - - batch - resources: - - cronjobs - verbs: - - delete ---- -# The cluster role for managing the Rook CRDs -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-ceph-global - labels: - operator: rook - storage-backend: ceph -rules: - - apiGroups: - - "" - resources: - # Pod access is needed for fencing - - pods - # Node access is needed for determining nodes where mons should run - - nodes - - nodes/proxy - - services - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - events - # PVs and PVCs are managed by the Rook provisioner - - persistentvolumes - - persistentvolumeclaims - - endpoints - verbs: - - get - - list - - watch - - patch - - create - - update - - delete - - apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch - - apiGroups: - - batch - resources: - - jobs - - cronjobs - verbs: - - get - - list - - watch - - create - - update - - delete - - apiGroups: - - ceph.rook.io - resources: - - "*" - verbs: - - "*" - - apiGroups: - - rook.io - resources: - - "*" - verbs: - - "*" - - apiGroups: - - policy - - apps - - extensions - resources: - # This is for the clusterdisruption controller - - poddisruptionbudgets - # This is for both clusterdisruption and nodedrain controllers - - deployments - - replicasets - verbs: - - "*" - - apiGroups: - - healthchecking.openshift.io - resources: - - machinedisruptionbudgets - verbs: - - get - - list - - watch - - create - - update - - delete - - apiGroups: - - machine.openshift.io - resources: - - machines - verbs: - - get - - list - - watch - - create - - update - - delete - - apiGroups: - - storage.k8s.io - resources: - - csidrivers - verbs: - - create - - delete - - get - - update - - apiGroups: - - k8s.cni.cncf.io - resources: - - network-attachment-definitions - verbs: - - get ---- -# Aspects of ceph-mgr that require cluster-wide access -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-cluster - labels: - operator: rook - storage-backend: ceph -rules: - - apiGroups: - - "" - resources: - - configmaps - - nodes - - nodes/proxy - - persistentvolumes - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list - - get - - watch - - apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-object-bucket - labels: - operator: rook - storage-backend: ceph -rules: - - apiGroups: - - "" - verbs: - - "*" - resources: - - secrets - - configmaps - - apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch - - apiGroups: - - "objectbucket.io" - verbs: - - "*" - resources: - - "*" -# OLM: END OPERATOR ROLE -# OLM: BEGIN SERVICE ACCOUNT SYSTEM ---- -# The rook system service account used by the operator, agent, and discovery pods -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-system - namespace: rook-ceph # namespace:operator - labels: - operator: rook - storage-backend: ceph -# imagePullSecrets: -# - name: my-registry-secret - -# OLM: END SERVICE ACCOUNT SYSTEM -# OLM: BEGIN OPERATOR ROLEBINDING ---- -# Grant the operator, agent, and discovery agents access to resources in the namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-system - namespace: rook-ceph # namespace:operator - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-system -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-system - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-system -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator ---- -# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-global - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-global -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator -# OLM: END OPERATOR ROLEBINDING -################################################################################################################# -# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph" -# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles -# and bindings accordingly. -################################################################################################################# -# Service account for the Ceph OSDs. Must exist and cannot be renamed. -# OLM: BEGIN SERVICE ACCOUNT OSD ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-osd - namespace: rook-ceph # namespace:cluster -# imagePullSecrets: -# - name: my-registry-secret - -# OLM: END SERVICE ACCOUNT OSD -# OLM: BEGIN SERVICE ACCOUNT MGR ---- -# Service account for the Ceph Mgr. Must exist and cannot be renamed. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster -# imagePullSecrets: -# - name: my-registry-secret - -# OLM: END SERVICE ACCOUNT MGR -# OLM: BEGIN CMD REPORTER SERVICE ACCOUNT ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-cmd-reporter - namespace: rook-ceph # namespace:cluster -# OLM: END CMD REPORTER SERVICE ACCOUNT -# OLM: BEGIN CLUSTER ROLE ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd - namespace: rook-ceph # namespace:cluster -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: ["ceph.rook.io"] - resources: ["cephclusters", "cephclusters/finalizers"] - verbs: ["get", "list", "create", "update", "delete"] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd -rules: - - apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list ---- -# Aspects of ceph-mgr that require access to the system namespace -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-system -rules: - - apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch ---- -# Aspects of ceph-mgr that operate within the cluster's namespace -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster -rules: - - apiGroups: - - "" - resources: - - pods - - services - - pods/log - verbs: - - get - - list - - watch - - create - - update - - delete - - apiGroups: - - batch - resources: - - jobs - verbs: - - get - - list - - watch - - create - - update - - delete - - apiGroups: - - ceph.rook.io - resources: - - "*" - verbs: - - "*" - - apiGroups: - - apps - resources: - - deployments/scale - - deployments - verbs: - - patch - - delete - - apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - delete -# OLM: END CLUSTER ROLE -# OLM: BEGIN CMD REPORTER ROLE ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter - namespace: rook-ceph # namespace:cluster -rules: - - apiGroups: - - "" - resources: - - pods - - configmaps - verbs: - - get - - list - - watch - - create - - update - - delete -# OLM: END CMD REPORTER ROLE -# OLM: BEGIN CLUSTER ROLEBINDING ---- -# Allow the operator to create resources in this cluster's namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cluster-mgmt - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-cluster-mgmt -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator ---- -# Allow the osd pods in this namespace to work with configmaps -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-osd - namespace: rook-ceph # namespace:cluster ---- -# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-mgr -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster ---- -# Allow the ceph mgr to access the rook system resources necessary for the mgr modules -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-system - namespace: rook-ceph # namespace:operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-mgr-system -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster ---- -# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-mgr-cluster -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster - ---- -# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-osd - namespace: rook-ceph # namespace:cluster - -# OLM: END CLUSTER ROLEBINDING -# OLM: BEGIN CMD REPORTER ROLEBINDING ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-cmd-reporter -subjects: - - kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: rook-ceph # namespace:cluster -# OLM: END CMD REPORTER ROLEBINDING -################################################################################################################# -# Beginning of pod security policy resources. The example will assume the cluster will be created in the -# "rook-ceph" namespace. If you want to create the cluster in a different namespace, you will need to modify -# the roles and bindings accordingly. -################################################################################################################# -# OLM: BEGIN CLUSTER POD SECURITY POLICY ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - # Note: Kubernetes matches PSPs to deployments alphabetically. In some environments, this PSP may - # need to be renamed with a value that will match before others. - name: 00-rook-privileged - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: "runtime/default" - seccomp.security.alpha.kubernetes.io/defaultProfileName: "runtime/default" -spec: - privileged: true - allowedCapabilities: - # required by CSI - - SYS_ADMIN - # fsGroup - the flexVolume agent has fsGroup capabilities and could potentially be any group - fsGroup: - rule: RunAsAny - # runAsUser, supplementalGroups - Rook needs to run some pods as root - # Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time - runAsUser: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - # seLinux - seLinux context is unknown ahead of time; set if this is well-known - seLinux: - rule: RunAsAny - volumes: - # recommended minimum set - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret - - projected - # required for Rook - - hostPath - - flexVolume - # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known - # allowedHostPaths: - # - pathPrefix: "/run/udev" # for OSD prep - # readOnly: false - # - pathPrefix: "/dev" # for OSD prep - # readOnly: false - # - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to - # readOnly: false - # Ceph requires host IPC for setting up encrypted devices - hostIPC: true - # Ceph OSDs need to share the same PID namespace - hostPID: true - # hostNetwork can be set to 'false' if host networking isn't used - hostNetwork: true - hostPorts: - # Ceph messenger protocol v1 - - min: 6789 - max: 6790 # <- support old default port - # Ceph messenger protocol v2 - - min: 3300 - max: 3300 - # Ceph RADOS ports for OSDs, MDSes - - min: 6800 - max: 7300 - # # Ceph dashboard port HTTP (not recommended) - # - min: 7000 - # max: 7000 - # Ceph dashboard port HTTPS - - min: 8443 - max: 8443 - # Ceph mgr Prometheus Metrics - - min: 9283 - max: 9283 -# OLM: END CLUSTER POD SECURITY POLICY -# OLM: BEGIN POD SECURITY POLICY BINDINGS ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: "psp:rook" -rules: - - apiGroups: - - policy - resources: - - podsecuritypolicies - resourceNames: - - 00-rook-privileged - verbs: - - use ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-ceph-system-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: "psp:rook" -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-default-psp - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: - - kind: ServiceAccount - name: default - namespace: rook-ceph # namespace:cluster ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-osd-psp - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: - - kind: ServiceAccount - name: rook-ceph-osd - namespace: rook-ceph # namespace:cluster ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-mgr-psp - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-cmd-reporter-psp - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: - - kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: rook-ceph # namespace:cluster -# OLM: END CLUSTER POD SECURITY POLICY BINDINGS -# OLM: BEGIN CSI CEPHFS SERVICE ACCOUNT ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-cephfs-plugin-sa - namespace: rook-ceph # namespace:operator ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph # namespace:operator -# OLM: END CSI CEPHFS SERVICE ACCOUNT -# OLM: BEGIN CSI CEPHFS ROLE ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-external-provisioner-cfg - namespace: rook-ceph # namespace:operator -rules: - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "create", "delete"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] -# OLM: END CSI CEPHFS ROLE -# OLM: BEGIN CSI CEPHFS ROLEBINDING ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-provisioner-role-cfg - namespace: rook-ceph # namespace:operator -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: Role - name: cephfs-external-provisioner-cfg - apiGroup: rbac.authorization.k8s.io -# OLM: END CSI CEPHFS ROLEBINDING -# OLM: BEGIN CSI CEPHFS CLUSTER ROLE ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-nodeplugin -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "update"] - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list"] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-external-provisioner-runner -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents/status"] - verbs: ["update"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create", "list", "watch", "delete", "get", "update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots/status"] - verbs: ["update"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments/status"] - verbs: ["patch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] -# OLM: END CSI CEPHFS CLUSTER ROLE -# OLM: BEGIN CSI CEPHFS CLUSTER ROLEBINDING ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-cephfs-plugin-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: "psp:rook" -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-plugin-sa - namespace: rook-ceph # namespace:operator ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-cephfs-provisioner-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: "psp:rook" -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph # namespace:operator ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-nodeplugin -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-plugin-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: ClusterRole - name: cephfs-csi-nodeplugin - apiGroup: rbac.authorization.k8s.io - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-provisioner-role -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: ClusterRole - name: cephfs-external-provisioner-runner - apiGroup: rbac.authorization.k8s.io -# OLM: END CSI CEPHFS CLUSTER ROLEBINDING -# OLM: BEGIN CSI RBD SERVICE ACCOUNT ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-rbd-plugin-sa - namespace: rook-ceph # namespace:operator ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph # namespace:operator -# OLM: END CSI RBD SERVICE ACCOUNT -# OLM: BEGIN CSI RBD ROLE ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-external-provisioner-cfg - namespace: rook-ceph # namespace:operator -rules: - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch", "create", "delete", "update"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] -# OLM: END CSI RBD ROLE -# OLM: BEGIN CSI RBD ROLEBINDING ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-provisioner-role-cfg - namespace: rook-ceph # namespace:operator -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: Role - name: rbd-external-provisioner-cfg - apiGroup: rbac.authorization.k8s.io -# OLM: END CSI RBD ROLEBINDING -# OLM: BEGIN CSI RBD CLUSTER ROLE ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-nodeplugin -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "update"] - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["serviceaccounts"] - verbs: ["get"] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-external-provisioner-runner -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments/status"] - verbs: ["patch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents/status"] - verbs: ["update"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create", "list", "watch", "delete", "get", "update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots/status"] - verbs: ["update"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get"] - - apiGroups: ["replication.storage.openshift.io"] - resources: ["volumereplications", "volumereplicationclasses"] - verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] - - apiGroups: ["replication.storage.openshift.io"] - resources: ["volumereplications/finalizers"] - verbs: ["update"] - - apiGroups: ["replication.storage.openshift.io"] - resources: ["volumereplications/status"] - verbs: ["get", "patch", "update"] - - apiGroups: ["replication.storage.openshift.io"] - resources: ["volumereplicationclasses/status"] - verbs: ["get"] - - apiGroups: [""] - resources: ["serviceaccounts"] - verbs: ["get"] -# OLM: END CSI RBD CLUSTER ROLE -# OLM: BEGIN CSI RBD CLUSTER ROLEBINDING ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-rbd-plugin-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: "psp:rook" -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-plugin-sa - namespace: rook-ceph # namespace:operator ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-rbd-provisioner-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: "psp:rook" -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph # namespace:operator ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-nodeplugin -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-plugin-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: ClusterRole - name: rbd-csi-nodeplugin - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-provisioner-role -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: ClusterRole - name: rbd-external-provisioner-runner - apiGroup: rbac.authorization.k8s.io -# OLM: END CSI RBD CLUSTER ROLEBINDING ---- -# Aspects of ceph osd purge job that require access to the operator/cluster namespace -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-purge-osd - namespace: rook-ceph # namespace:operator -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get"] - - apiGroups: ["apps"] - resources: ["deployments"] - verbs: ["get", "delete"] - - apiGroups: ["batch"] - resources: ["jobs"] - verbs: ["get", "list", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["delete"] ---- -# Allow the osd purge job to run in this namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-purge-osd - namespace: rook-ceph # namespace:operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-purge-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-purge-osd - namespace: rook-ceph # namespace:operator ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-purge-osd - namespace: rook-ceph # namespace:operator diff --git a/cluster/examples/kubernetes/ceph/crds.yaml b/cluster/examples/kubernetes/ceph/crds.yaml deleted file mode 100644 index d653fdfdb..000000000 --- a/cluster/examples/kubernetes/ceph/crds.yaml +++ /dev/null @@ -1,9154 +0,0 @@ -############################################################################## -# Create the CRDs that are necessary before creating your Rook cluster. -# These resources *must* be created before the cluster.yaml or their variants. -############################################################################## ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephblockpools.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephBlockPool - listKind: CephBlockPoolList - plural: cephblockpools - singular: cephblockpool - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephBlockPool represents a Ceph Storage Pool - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: PoolSpec represents the spec of ceph pool - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - status: - description: CephBlockPoolStatus represents the mirroring status of Ceph Storage Pool - properties: - info: - additionalProperties: - type: string - nullable: true - type: object - mirroringInfo: - description: MirroringInfoSpec is the status of the pool mirroring - properties: - details: - type: string - lastChanged: - type: string - lastChecked: - type: string - mode: - description: Mode is the mirroring mode - type: string - peers: - description: Peers are the list of peer sites connected to that cluster - items: - description: PeersSpec contains peer details - properties: - client_name: - description: ClientName is the CephX user used to connect to the peer - type: string - direction: - description: Direction is the peer mirroring direction - type: string - mirror_uuid: - description: MirrorUUID is the mirror UUID - type: string - site_name: - description: SiteName is the current site name - type: string - uuid: - description: UUID is the peer UUID - type: string - type: object - type: array - site_name: - description: SiteName is the current site name - type: string - type: object - mirroringStatus: - description: MirroringStatusSpec is the status of the pool mirroring - properties: - details: - description: Details contains potential status errors - type: string - lastChanged: - description: LastChanged is the last time time the status last changed - type: string - lastChecked: - description: LastChecked is the last time time the status was checked - type: string - summary: - description: Summary is the mirroring status summary - properties: - daemon_health: - description: DaemonHealth is the health of the mirroring daemon - type: string - health: - description: Health is the mirroring health - type: string - image_health: - description: ImageHealth is the health of the mirrored image - type: string - states: - description: States is the various state for all mirrored images - nullable: true - properties: - error: - description: Error is when the mirroring state is errored - type: integer - replaying: - description: Replaying is when the replay of the mirroring journal is on-going - type: integer - starting_replay: - description: StartingReplay is when the replay of the mirroring journal starts - type: integer - stopped: - description: Stopped is when the mirroring state is stopped - type: integer - stopping_replay: - description: StopReplaying is when the replay of the mirroring journal stops - type: integer - syncing: - description: Syncing is when the image is syncing - type: integer - unknown: - description: Unknown is when the mirroring state is unknown - type: integer - type: object - type: object - type: object - phase: - description: ConditionType represent a resource's status - type: string - snapshotScheduleStatus: - description: SnapshotScheduleStatusSpec is the status of the snapshot schedule - properties: - details: - description: Details contains potential status errors - type: string - lastChanged: - description: LastChanged is the last time time the status last changed - type: string - lastChecked: - description: LastChecked is the last time time the status was checked - type: string - snapshotSchedules: - description: SnapshotSchedules is the list of snapshots scheduled - items: - description: SnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool - properties: - image: - description: Image is the mirrored image - type: string - items: - description: Items is the list schedules times for a given snapshot - items: - description: SnapshotSchedule is a schedule - properties: - interval: - description: Interval is the interval in which snapshots will be taken - type: string - start_time: - description: StartTime is the snapshot starting time - type: string - type: object - type: array - namespace: - description: Namespace is the RADOS namespace the image is part of - type: string - pool: - description: Pool is the pool name - type: string - type: object - nullable: true - type: array - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephclients.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephClient - listKind: CephClientList - plural: cephclients - singular: cephclient - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephClient represents a Ceph Client - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec represents the specification of a Ceph Client - properties: - caps: - additionalProperties: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - name: - type: string - required: - - caps - type: object - status: - description: Status represents the status of a Ceph Client - properties: - info: - additionalProperties: - type: string - nullable: true - type: object - phase: - description: ConditionType represent a resource's status - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephclusters.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephCluster - listKind: CephClusterList - plural: cephclusters - singular: cephcluster - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Directory used on the K8s nodes - jsonPath: .spec.dataDirHostPath - name: DataDirHostPath - type: string - - description: Number of MONs - jsonPath: .spec.mon.count - name: MonCount - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Phase - jsonPath: .status.phase - name: Phase - type: string - - description: Message - jsonPath: .status.message - name: Message - type: string - - description: Ceph Health - jsonPath: .status.ceph.health - name: Health - type: string - - jsonPath: .spec.external.enable - name: External - type: boolean - name: v1 - schema: - openAPIV3Schema: - description: CephCluster is a Ceph storage cluster - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ClusterSpec represents the specification of Ceph Cluster - properties: - annotations: - additionalProperties: - additionalProperties: - type: string - description: Annotations are annotations - type: object - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - cephVersion: - description: The version information that instructs Rook to orchestrate a particular version of Ceph. - nullable: true - properties: - allowUnsupported: - description: Whether to allow unsupported versions (do not set to true in production) - type: boolean - image: - description: Image is the container image used to launch the ceph daemons, such as quay.io/ceph/ceph: The full list of images can be found at https://quay.io/repository/ceph/ceph?tab=tags - type: string - type: object - cleanupPolicy: - description: Indicates user intent when deleting a cluster; blocks orchestration and should not be set if cluster deletion is not imminent. - nullable: true - properties: - allowUninstallWithVolumes: - description: AllowUninstallWithVolumes defines whether we can proceed with the uninstall if they are RBD images still present - type: boolean - confirmation: - description: Confirmation represents the cleanup confirmation - nullable: true - pattern: ^$|^yes-really-destroy-data$ - type: string - sanitizeDisks: - description: SanitizeDisks represents way we sanitize disks - nullable: true - properties: - dataSource: - description: DataSource is the data source to use to sanitize the disk with - enum: - - zero - - random - type: string - iteration: - description: Iteration is the number of pass to apply the sanitizing - format: int32 - type: integer - method: - description: Method is the method we use to sanitize disks - enum: - - complete - - quick - type: string - type: object - type: object - continueUpgradeAfterChecksEvenIfNotHealthy: - description: ContinueUpgradeAfterChecksEvenIfNotHealthy defines if an upgrade should continue even if PGs are not clean - type: boolean - crashCollector: - description: A spec for the crash controller - nullable: true - properties: - daysToRetain: - description: DaysToRetain represents the number of days to retain crash until they get pruned - type: integer - disable: - description: Disable determines whether we should enable the crash collector - type: boolean - type: object - dashboard: - description: Dashboard settings - nullable: true - properties: - enabled: - description: Enabled determines whether to enable the dashboard - type: boolean - port: - description: Port is the dashboard webserver port - maximum: 65535 - minimum: 0 - type: integer - ssl: - description: SSL determines whether SSL should be used - type: boolean - urlPrefix: - description: URLPrefix is a prefix for all URLs to use the dashboard with a reverse proxy - type: string - type: object - dataDirHostPath: - description: The path on the host where config and data can be persisted - pattern: ^/(\S+) - type: string - disruptionManagement: - description: A spec for configuring disruption management. - nullable: true - properties: - machineDisruptionBudgetNamespace: - description: Namespace to look for MDBs by the machineDisruptionBudgetController - type: string - manageMachineDisruptionBudgets: - description: This enables management of machinedisruptionbudgets - type: boolean - managePodBudgets: - description: This enables management of poddisruptionbudgets - type: boolean - osdMaintenanceTimeout: - description: OSDMaintenanceTimeout sets how many additional minutes the DOWN/OUT interval is for drained failure domains it only works if managePodBudgets is true. the default is 30 minutes - format: int64 - type: integer - pgHealthCheckTimeout: - description: PGHealthCheckTimeout is the time (in minutes) that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. Rook will continue with the next drain if the timeout exceeds. It only works if managePodBudgets is true. No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. - format: int64 - type: integer - type: object - external: - description: Whether the Ceph Cluster is running external to this Kubernetes cluster mon, mgr, osd, mds, and discover daemons will not be created for external clusters. - nullable: true - properties: - enable: - description: Enable determines whether external mode is enabled or not - type: boolean - type: object - x-kubernetes-preserve-unknown-fields: true - healthCheck: - description: Internal daemon healthchecks and liveness probe - nullable: true - properties: - daemonHealth: - description: DaemonHealth is the health check for a given daemon - nullable: true - properties: - mon: - description: Monitor represents the health check settings for the Ceph monitor - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - osd: - description: ObjectStorageDaemon represents the health check settings for the Ceph OSDs - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - status: - description: Status represents the health check settings for the Ceph health - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - livenessProbe: - additionalProperties: - description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon - properties: - disabled: - description: Disabled determines whether probe is disable or not - type: boolean - probe: - description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. - properties: - exec: - description: One and only one of the following should be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - type: object - description: LivenessProbe allows to change the livenessprobe configuration for a given daemon - type: object - type: object - labels: - additionalProperties: - additionalProperties: - type: string - description: Labels are label for a given daemons - type: object - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - logCollector: - description: Logging represents loggings settings - nullable: true - properties: - enabled: - description: Enabled represents whether the log collector is enabled - type: boolean - periodicity: - description: Periodicity is the periodicity of the log rotation - type: string - type: object - mgr: - description: A spec for mgr related options - nullable: true - properties: - allowMultiplePerNode: - description: AllowMultiplePerNode allows to run multiple managers on the same node (not recommended) - type: boolean - count: - description: Count is the number of manager to run - maximum: 2 - minimum: 0 - type: integer - modules: - description: Modules is the list of ceph manager modules to enable/disable - items: - description: Module represents mgr modules that the user wants to enable or disable - properties: - enabled: - description: Enabled determines whether a module should be enabled or not - type: boolean - name: - description: Name is the name of the ceph manager module - type: string - type: object - nullable: true - type: array - type: object - mon: - description: A spec for mon related options - nullable: true - properties: - allowMultiplePerNode: - description: AllowMultiplePerNode determines if we can run multiple monitors on the same node (not recommended) - type: boolean - count: - description: Count is the number of Ceph monitors - minimum: 0 - type: integer - stretchCluster: - description: StretchCluster is the stretch cluster specification - properties: - failureDomainLabel: - description: 'FailureDomainLabel the failure domain name (e,g: zone)' - type: string - subFailureDomain: - description: SubFailureDomain is the failure domain within a zone - type: string - zones: - description: Zones is the list of zones - items: - description: StretchClusterZoneSpec represents the specification of a stretched zone in a Ceph Cluster - properties: - arbiter: - description: Arbiter determines if the zone contains the arbiter - type: boolean - name: - description: Name is the name of the zone - type: string - volumeClaimTemplate: - description: VolumeClaimTemplate is the PVC template - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - nullable: true - type: array - type: object - volumeClaimTemplate: - description: VolumeClaimTemplate is the PVC definition - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - monitoring: - description: Prometheus based Monitoring settings - nullable: true - properties: - enabled: - description: Enabled determines whether to create the prometheus rules for the ceph cluster. If true, the prometheus types must exist or the creation will fail. - type: boolean - externalMgrEndpoints: - description: ExternalMgrEndpoints points to an existing Ceph prometheus exporter endpoint - items: - description: EndpointAddress is a tuple that describes single IP address. - properties: - hostname: - description: The Hostname of this endpoint - type: string - ip: - description: 'The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. TODO: This should allow hostname or IP, See #4447.' - type: string - nodeName: - description: 'Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.' - type: string - targetRef: - description: Reference to object providing the endpoint. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string - type: object - required: - - ip - type: object - nullable: true - type: array - externalMgrPrometheusPort: - description: ExternalMgrPrometheusPort Prometheus exporter port - maximum: 65535 - minimum: 0 - type: integer - rulesNamespace: - description: RulesNamespace is the namespace where the prometheus rules and alerts should be created. If empty, the same namespace as the cluster will be used. - type: string - type: object - network: - description: Network related configuration - nullable: true - properties: - dualStack: - description: DualStack determines whether Ceph daemons should listen on both IPv4 and IPv6 - type: boolean - hostNetwork: - description: HostNetwork to enable host network - type: boolean - ipFamily: - default: IPv4 - description: IPFamily is the single stack IPv6 or IPv4 protocol - enum: - - IPv4 - - IPv6 - nullable: true - type: string - provider: - description: Provider is what provides network connectivity to the cluster e.g. "host" or "multus" - nullable: true - type: string - selectors: - additionalProperties: - type: string - description: Selectors string values describe what networks will be used to connect the cluster. Meanwhile the keys describe each network respective responsibilities or any metadata storage provider decide. - nullable: true - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - placement: - additionalProperties: - description: Placement is the placement for an object - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - description: The placement-related configuration to pass to kubernetes (affinity, node selector, tolerations). - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - priorityClassNames: - additionalProperties: - type: string - description: PriorityClassNames sets priority classes on components - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - removeOSDsIfOutAndSafeToRemove: - description: Remove the OSD that is out and safe to remove only if this option is true - type: boolean - resources: - additionalProperties: - description: ResourceRequirements describes the compute resource requirements. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - description: Resources set resource requests and limits - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - security: - description: Security represents security settings - nullable: true - properties: - kms: - description: KeyManagementService is the main Key Management option - nullable: true - properties: - connectionDetails: - additionalProperties: - type: string - description: ConnectionDetails contains the KMS connection details (address, port etc) - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - tokenSecretName: - description: TokenSecretName is the kubernetes secret containing the KMS token - type: string - type: object - type: object - skipUpgradeChecks: - description: SkipUpgradeChecks defines if an upgrade should be forced even if one of the check fails - type: boolean - storage: - description: A spec for available storage in the cluster and how it should be used - nullable: true - properties: - config: - additionalProperties: - type: string - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - deviceFilter: - description: A regular expression to allow more fine-grained selection of devices on nodes across the cluster - type: string - devicePathFilter: - description: A regular expression to allow more fine-grained selection of devices with path names - type: string - devices: - description: List of devices to use as storage devices - items: - description: Device represents a disk to use in the cluster - properties: - config: - additionalProperties: - type: string - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - fullpath: - type: string - name: - type: string - type: object - nullable: true - type: array - x-kubernetes-preserve-unknown-fields: true - nodes: - items: - description: Node is a storage nodes - properties: - config: - additionalProperties: - type: string - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - deviceFilter: - description: A regular expression to allow more fine-grained selection of devices on nodes across the cluster - type: string - devicePathFilter: - description: A regular expression to allow more fine-grained selection of devices with path names - type: string - devices: - description: List of devices to use as storage devices - items: - description: Device represents a disk to use in the cluster - properties: - config: - additionalProperties: - type: string - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - fullpath: - type: string - name: - type: string - type: object - nullable: true - type: array - x-kubernetes-preserve-unknown-fields: true - name: - type: string - resources: - description: ResourceRequirements describes the compute resource requirements. - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - useAllDevices: - description: Whether to consume all the storage devices found on a machine - type: boolean - volumeClaimTemplates: - description: PersistentVolumeClaims to use as storage - items: - description: PersistentVolumeClaim is a user's request for and claim to a persistent volume - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - type: array - type: object - nullable: true - type: array - onlyApplyOSDPlacement: - type: boolean - storageClassDeviceSets: - items: - description: StorageClassDeviceSet is a storage class device set - properties: - config: - additionalProperties: - type: string - description: Provider-specific device configuration - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - count: - description: Count is the number of devices in this set - minimum: 1 - type: integer - encrypted: - description: Whether to encrypt the deviceSet - type: boolean - name: - description: Name is a unique identifier for the set - type: string - placement: - description: Placement is the placement for an object - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - portable: - description: Portable represents OSD portability across the hosts - type: boolean - preparePlacement: - description: Placement is the placement for an object - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - resources: - description: ResourceRequirements describes the compute resource requirements. - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - schedulerName: - description: Scheduler name for OSD pod placement - type: string - tuneDeviceClass: - description: TuneSlowDeviceClass Tune the OSD when running on a slow Device Class - type: boolean - tuneFastDeviceClass: - description: TuneFastDeviceClass Tune the OSD when running on a fast Device Class - type: boolean - volumeClaimTemplates: - description: VolumeClaimTemplates is a list of PVC templates for the underlying storage devices - items: - description: PersistentVolumeClaim is a user's request for and claim to a persistent volume - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - type: array - required: - - count - - name - - volumeClaimTemplates - type: object - nullable: true - type: array - useAllDevices: - description: Whether to consume all the storage devices found on a machine - type: boolean - useAllNodes: - type: boolean - volumeClaimTemplates: - description: PersistentVolumeClaims to use as storage - items: - description: PersistentVolumeClaim is a user's request for and claim to a persistent volume - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - type: array - type: object - waitTimeoutForHealthyOSDInMinutes: - description: WaitTimeoutForHealthyOSDInMinutes defines the time the operator would wait before an OSD can be stopped for upgrade or restart. If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. The default wait timeout is 10 minutes. - format: int64 - type: integer - type: object - status: - description: ClusterStatus represents the status of a Ceph cluster - nullable: true - properties: - ceph: - description: CephStatus is the details health of a Ceph Cluster - properties: - capacity: - description: Capacity is the capacity information of a Ceph Cluster - properties: - bytesAvailable: - format: int64 - type: integer - bytesTotal: - format: int64 - type: integer - bytesUsed: - format: int64 - type: integer - lastUpdated: - type: string - type: object - details: - additionalProperties: - description: CephHealthMessage represents the health message of a Ceph Cluster - properties: - message: - type: string - severity: - type: string - required: - - message - - severity - type: object - type: object - health: - type: string - lastChanged: - type: string - lastChecked: - type: string - previousHealth: - type: string - versions: - description: CephDaemonsVersions show the current ceph version for different ceph daemons - properties: - cephfs-mirror: - additionalProperties: - type: integer - description: CephFSMirror shows CephFSMirror Ceph version - type: object - mds: - additionalProperties: - type: integer - description: Mds shows Mds Ceph version - type: object - mgr: - additionalProperties: - type: integer - description: Mgr shows Mgr Ceph version - type: object - mon: - additionalProperties: - type: integer - description: Mon shows Mon Ceph version - type: object - osd: - additionalProperties: - type: integer - description: Osd shows Osd Ceph version - type: object - overall: - additionalProperties: - type: integer - description: Overall shows overall Ceph version - type: object - rbd-mirror: - additionalProperties: - type: integer - description: RbdMirror shows RbdMirror Ceph version - type: object - rgw: - additionalProperties: - type: integer - description: Rgw shows Rgw Ceph version - type: object - type: object - type: object - conditions: - items: - description: Condition represents a status condition on any Rook-Ceph Custom Resource. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - description: ConditionReason is a reason for a condition - type: string - status: - type: string - type: - description: ConditionType represent a resource's status - type: string - type: object - type: array - message: - type: string - phase: - description: ConditionType represent a resource's status - type: string - state: - description: ClusterState represents the state of a Ceph Cluster - type: string - storage: - description: CephStorage represents flavors of Ceph Cluster Storage - properties: - deviceClasses: - items: - description: DeviceClasses represents device classes of a Ceph Cluster - properties: - name: - type: string - type: object - type: array - type: object - version: - description: ClusterVersion represents the version of a Ceph Cluster - properties: - image: - type: string - version: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephfilesystemmirrors.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephFilesystemMirror - listKind: CephFilesystemMirrorList - plural: cephfilesystemmirrors - singular: cephfilesystemmirror - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephFilesystemMirror is the Ceph Filesystem Mirror object definition - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: FilesystemMirroringSpec is the filesystem mirroring specification - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - labels: - additionalProperties: - type: string - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - placement: - description: The affinity to place the rgw pods (default is to place on any available node) - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - priorityClassName: - description: PriorityClassName sets priority class on the cephfs-mirror pods - type: string - resources: - description: The resource requirements for the cephfs-mirror pods - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephfilesystems.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephFilesystem - listKind: CephFilesystemList - plural: cephfilesystems - singular: cephfilesystem - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Number of desired active MDS daemons - jsonPath: .spec.metadataServer.activeCount - name: ActiveMDS - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .status.phase - name: Phase - type: string - name: v1 - schema: - openAPIV3Schema: - description: CephFilesystem represents a Ceph Filesystem - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: FilesystemSpec represents the spec of a file system - properties: - dataPools: - description: The data pool settings - items: - description: PoolSpec represents the spec of ceph pool - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - nullable: true - type: array - metadataPool: - description: The metadata pool settings - nullable: true - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - metadataServer: - description: The mds pod info - properties: - activeCount: - description: The number of metadata servers that are active. The remaining servers in the cluster will be in standby mode. - format: int32 - maximum: 10 - minimum: 1 - type: integer - activeStandby: - description: Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. If false, standbys will still be available, but will not have a warm metadata cache. - type: boolean - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - labels: - additionalProperties: - type: string - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - placement: - description: The affinity to place the mds pods (default is to place on all available node) with a daemonset - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - priorityClassName: - description: PriorityClassName sets priority classes on components - type: string - resources: - description: The resource requirements for the rgw pods - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - activeCount - type: object - mirroring: - description: The mirroring settings - nullable: true - properties: - enabled: - description: Enabled whether this filesystem is mirrored or not - type: boolean - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotRetention: - description: Retention is the retention policy for a snapshot schedule One path has exactly one retention policy. A policy can however contain multiple count-time period pairs in order to specify complex retention policies - items: - description: SnapshotScheduleRetentionSpec is a retention policy - properties: - duration: - description: Duration represents the retention duration for a snapshot - type: string - path: - description: Path is the path to snapshot - type: string - type: object - type: array - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored filesystems - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - preserveFilesystemOnDelete: - description: Preserve the fs in the cluster on CephFilesystem CR deletion. Setting this to true automatically implies PreservePoolsOnDelete is true. - type: boolean - preservePoolsOnDelete: - description: Preserve pools on filesystem deletion - type: boolean - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - dataPools - - metadataPool - - metadataServer - type: object - status: - description: CephFilesystemStatus represents the status of a Ceph Filesystem - properties: - info: - additionalProperties: - type: string - description: Use only info and put mirroringStatus in it? - nullable: true - type: object - mirroringStatus: - description: MirroringStatus is the filesystem mirroring status - properties: - daemonsStatus: - description: PoolMirroringStatus is the mirroring status of a filesystem - items: - description: FilesystemMirrorInfoSpec is the filesystem mirror status of a given filesystem - properties: - daemon_id: - description: DaemonID is the cephfs-mirror name - type: integer - filesystems: - description: Filesystems is the list of filesystems managed by a given cephfs-mirror daemon - items: - description: FilesystemsSpec is spec for the mirrored filesystem - properties: - directory_count: - description: DirectoryCount is the number of directories in the filesystem - type: integer - filesystem_id: - description: FilesystemID is the filesystem identifier - type: integer - name: - description: Name is name of the filesystem - type: string - peers: - description: Peers represents the mirroring peers - items: - description: FilesystemMirrorInfoPeerSpec is the specification of a filesystem peer mirror - properties: - remote: - description: Remote are the remote cluster information - properties: - client_name: - description: ClientName is cephx name - type: string - cluster_name: - description: ClusterName is the name of the cluster - type: string - fs_name: - description: FsName is the filesystem name - type: string - type: object - stats: - description: Stats are the stat a peer mirror - properties: - failure_count: - description: FailureCount is the number of mirroring failure - type: integer - recovery_count: - description: RecoveryCount is the number of recovery attempted after failures - type: integer - type: object - uuid: - description: UUID is the peer unique identifier - type: string - type: object - type: array - type: object - type: array - type: object - nullable: true - type: array - details: - description: Details contains potential status errors - type: string - lastChanged: - description: LastChanged is the last time time the status last changed - type: string - lastChecked: - description: LastChecked is the last time time the status was checked - type: string - type: object - phase: - description: ConditionType represent a resource's status - type: string - snapshotScheduleStatus: - description: FilesystemSnapshotScheduleStatusSpec is the status of the snapshot schedule - properties: - details: - description: Details contains potential status errors - type: string - lastChanged: - description: LastChanged is the last time time the status last changed - type: string - lastChecked: - description: LastChecked is the last time time the status was checked - type: string - snapshotSchedules: - description: SnapshotSchedules is the list of snapshots scheduled - items: - description: FilesystemSnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool - properties: - fs: - description: Fs is the name of the Ceph Filesystem - type: string - path: - description: Path is the path on the filesystem - type: string - rel_path: - type: string - retention: - description: FilesystemSnapshotScheduleStatusRetention is the retention specification for a filesystem snapshot schedule - properties: - active: - description: Active is whether the scheduled is active or not - type: boolean - created: - description: Created is when the snapshot schedule was created - type: string - created_count: - description: CreatedCount is total amount of snapshots - type: integer - first: - description: First is when the first snapshot schedule was taken - type: string - last: - description: Last is when the last snapshot schedule was taken - type: string - last_pruned: - description: LastPruned is when the last snapshot schedule was pruned - type: string - pruned_count: - description: PrunedCount is total amount of pruned snapshots - type: integer - start: - description: Start is when the snapshot schedule starts - type: string - type: object - schedule: - type: string - subvol: - description: Subvol is the name of the sub volume - type: string - type: object - nullable: true - type: array - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephnfses.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephNFS - listKind: CephNFSList - plural: cephnfses - shortNames: - - nfs - singular: cephnfs - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephNFS represents a Ceph NFS - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: NFSGaneshaSpec represents the spec of an nfs ganesha server - properties: - rados: - description: RADOS is the Ganesha RADOS specification - properties: - namespace: - description: Namespace is the RADOS namespace where NFS client recovery data is stored. - type: string - pool: - description: Pool is the RADOS pool where NFS client recovery data is stored. - type: string - required: - - namespace - - pool - type: object - server: - description: Server is the Ganesha Server specification - properties: - active: - description: The number of active Ganesha servers - type: integer - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - labels: - additionalProperties: - type: string - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - logLevel: - description: LogLevel set logging level - type: string - placement: - description: The affinity to place the ganesha pods - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - priorityClassName: - description: PriorityClassName sets the priority class on the pods - type: string - resources: - description: Resources set resource requests and limits - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - active - type: object - required: - - rados - - server - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephobjectrealms.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectRealm - listKind: CephObjectRealmList - plural: cephobjectrealms - singular: cephobjectrealm - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephObjectRealm represents a Ceph Object Store Gateway Realm - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ObjectRealmSpec represent the spec of an ObjectRealm - nullable: true - properties: - pull: - description: PullSpec represents the pulling specification of a Ceph Object Storage Gateway Realm - properties: - endpoint: - type: string - required: - - endpoint - type: object - required: - - pull - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephobjectstores.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectStore - listKind: CephObjectStoreList - plural: cephobjectstores - singular: cephobjectstore - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephObjectStore represents a Ceph Object Store Gateway - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ObjectStoreSpec represent the spec of a pool - properties: - dataPool: - description: The data pool settings - nullable: true - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - gateway: - description: The rgw pod info - nullable: true - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - caBundleRef: - description: The name of the secret that stores custom ca-bundle with root and intermediate certificates. - nullable: true - type: string - externalRgwEndpoints: - description: ExternalRgwEndpoints points to external rgw endpoint(s) - items: - description: EndpointAddress is a tuple that describes single IP address. - properties: - hostname: - description: The Hostname of this endpoint - type: string - ip: - description: 'The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. TODO: This should allow hostname or IP, See #4447.' - type: string - nodeName: - description: 'Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.' - type: string - targetRef: - description: Reference to object providing the endpoint. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string - type: object - required: - - ip - type: object - nullable: true - type: array - instances: - description: The number of pods in the rgw replicaset. - format: int32 - nullable: true - type: integer - labels: - additionalProperties: - type: string - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - placement: - description: The affinity to place the rgw pods (default is to place on any available node) - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - port: - description: The port the rgw service will be listening on (http) - format: int32 - type: integer - priorityClassName: - description: PriorityClassName sets priority classes on the rgw pods - type: string - resources: - description: The resource requirements for the rgw pods - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - securePort: - description: The port the rgw service will be listening on (https) - format: int32 - maximum: 65535 - minimum: 0 - nullable: true - type: integer - service: - description: The configuration related to add/set on each rgw service. - nullable: true - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each rgw service. nullable optional - type: object - type: object - sslCertificateRef: - description: The name of the secret that stores the ssl certificate for secure rgw connections - nullable: true - type: string - type: object - healthCheck: - description: The rgw Bucket healthchecks and liveness probe - nullable: true - properties: - bucket: - description: HealthCheckSpec represents the health check of an object store bucket - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - livenessProbe: - description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon - properties: - disabled: - description: Disabled determines whether probe is disable or not - type: boolean - probe: - description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. - properties: - exec: - description: One and only one of the following should be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - type: object - type: object - metadataPool: - description: The metadata pool settings - nullable: true - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - preservePoolsOnDelete: - description: Preserve pools on object store deletion - type: boolean - security: - description: Security represents security settings - nullable: true - properties: - kms: - description: KeyManagementService is the main Key Management option - nullable: true - properties: - connectionDetails: - additionalProperties: - type: string - description: ConnectionDetails contains the KMS connection details (address, port etc) - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - tokenSecretName: - description: TokenSecretName is the kubernetes secret containing the KMS token - type: string - type: object - type: object - zone: - description: The multisite info - nullable: true - properties: - name: - description: RGW Zone the Object Store is in - type: string - required: - - name - type: object - type: object - status: - description: ObjectStoreStatus represents the status of a Ceph Object Store resource - properties: - bucketStatus: - description: BucketStatus represents the status of a bucket - properties: - details: - type: string - health: - description: ConditionType represent a resource's status - type: string - lastChanged: - type: string - lastChecked: - type: string - type: object - conditions: - items: - description: Condition represents a status condition on any Rook-Ceph Custom Resource. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - description: ConditionReason is a reason for a condition - type: string - status: - type: string - type: - description: ConditionType represent a resource's status - type: string - type: object - type: array - info: - additionalProperties: - type: string - nullable: true - type: object - message: - type: string - phase: - description: ConditionType represent a resource's status - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephobjectstoreusers.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectStoreUser - listKind: CephObjectStoreUserList - plural: cephobjectstoreusers - shortNames: - - rcou - - objectuser - singular: cephobjectstoreuser - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephObjectStoreUser represents a Ceph Object Store Gateway User - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ObjectStoreUserSpec represent the spec of an Objectstoreuser - properties: - displayName: - description: The display name for the ceph users - type: string - store: - description: The store the user will be created in - type: string - type: object - status: - description: ObjectStoreUserStatus represents the status Ceph Object Store Gateway User - properties: - info: - additionalProperties: - type: string - nullable: true - type: object - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephobjectzonegroups.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectZoneGroup - listKind: CephObjectZoneGroupList - plural: cephobjectzonegroups - singular: cephobjectzonegroup - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephObjectZoneGroup represents a Ceph Object Store Gateway Zone Group - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ObjectZoneGroupSpec represent the spec of an ObjectZoneGroup - properties: - realm: - description: The display name for the ceph users - type: string - required: - - realm - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephobjectzones.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectZone - listKind: CephObjectZoneList - plural: cephobjectzones - singular: cephobjectzone - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephObjectZone represents a Ceph Object Store Gateway Zone - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ObjectZoneSpec represent the spec of an ObjectZone - properties: - dataPool: - description: The data pool settings - nullable: true - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - metadataPool: - description: The metadata pool settings - nullable: true - properties: - compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' - enum: - - none - - passive - - aggressive - - force - - "" - nullable: true - type: string - crushRoot: - description: The root of the crush hierarchy utilized by the pool - nullable: true - type: string - deviceClass: - description: The device class the OSD should set to for use in the pool - nullable: true - type: string - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - type: boolean - erasureCoded: - description: The erasure code settings - properties: - algorithm: - description: The algorithm for erasure coding - type: string - codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 - minimum: 0 - type: integer - required: - - codingChunks - - dataChunks - type: object - failureDomain: - description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' - type: string - mirroring: - description: The mirroring settings - properties: - enabled: - description: Enabled whether this pool is mirrored or not - type: boolean - mode: - description: 'Mode is the mirroring mode: either pool or image' - type: string - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - snapshotSchedules: - description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - items: - description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool - properties: - interval: - description: Interval represent the periodicity of the snapshot. - type: string - path: - description: Path is the path to snapshot, only valid for CephFS - type: string - startTime: - description: StartTime indicates when to start the snapshot - type: string - type: object - type: array - type: object - parameters: - additionalProperties: - type: string - description: Parameters is a list of properties to enable on a given pool - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - quotas: - description: The quota settings - nullable: true - properties: - maxBytes: - description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize - format: int64 - type: integer - maxObjects: - description: MaxObjects represents the quota in objects - format: int64 - type: integer - maxSize: - description: MaxSize represents the quota in bytes as a string - pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ - type: string - type: object - replicated: - description: The replication settings - properties: - hybridStorage: - description: HybridStorage represents hybrid storage tier settings - nullable: true - properties: - primaryDeviceClass: - description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - minLength: 1 - type: string - secondaryDeviceClass: - description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - minLength: 1 - type: string - required: - - primaryDeviceClass - - secondaryDeviceClass - type: object - replicasPerFailureDomain: - description: ReplicasPerFailureDomain the number of replica in the specified failure domain - minimum: 1 - type: integer - requireSafeReplicaSize: - description: RequireSafeReplicaSize if false allows you to set replica 1 - type: boolean - size: - description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - minimum: 0 - type: integer - subFailureDomain: - description: SubFailureDomain the name of the sub-failure domain - type: string - targetSizeRatio: - description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - type: number - required: - - size - type: object - statusCheck: - description: The mirroring statusCheck - properties: - mirror: - description: HealthCheckSpec represents the health check of an object store bucket - nullable: true - properties: - disabled: - type: boolean - interval: - description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - type: string - timeout: - type: string - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - zoneGroup: - description: The display name for the ceph users - type: string - required: - - dataPool - - metadataPool - - zoneGroup - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: cephrbdmirrors.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephRBDMirror - listKind: CephRBDMirrorList - plural: cephrbdmirrors - singular: cephrbdmirror - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: CephRBDMirror represents a Ceph RBD Mirror - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: RBDMirroringSpec represents the specification of an RBD mirror daemon - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - count: - description: Count represents the number of rbd mirror instance to run - minimum: 1 - type: integer - labels: - additionalProperties: - type: string - description: The labels-related configuration to add/set on each Pod related object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - peers: - description: Peers represents the peers spec - nullable: true - properties: - secretNames: - description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - items: - type: string - type: array - type: object - placement: - description: The affinity to place the rgw pods (default is to place on any available node) - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - priorityClassName: - description: PriorityClassName sets priority class on the rbd mirror pods - type: string - resources: - description: The resource requirements for the rbd mirror pods - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - count - type: object - status: - description: Status represents the status of an object - properties: - phase: - type: string - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: objectbucketclaims.objectbucket.io -spec: - group: objectbucket.io - names: - kind: ObjectBucketClaim - listKind: ObjectBucketClaimList - plural: objectbucketclaims - singular: objectbucketclaim - shortNames: - - obc - - obcs - scope: Namespaced - versions: - - name: v1alpha1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - storageClassName: - type: string - bucketName: - type: string - generateBucketName: - type: string - additionalConfig: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - objectBucketName: - type: string - status: - type: object - x-kubernetes-preserve-unknown-fields: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: objectbuckets.objectbucket.io -spec: - group: objectbucket.io - names: - kind: ObjectBucket - listKind: ObjectBucketList - plural: objectbuckets - singular: objectbucket - shortNames: - - ob - - obs - scope: Cluster - versions: - - name: v1alpha1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - storageClassName: - type: string - endpoint: - type: object - nullable: true - properties: - bucketHost: - type: string - bucketPort: - type: integer - format: int32 - bucketName: - type: string - region: - type: string - subRegion: - type: string - additionalConfig: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - authentication: - type: object - nullable: true - items: - type: object - x-kubernetes-preserve-unknown-fields: true - additionalState: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - reclaimPolicy: - type: string - claimRef: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - type: object - x-kubernetes-preserve-unknown-fields: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: volumereplicationclasses.replication.storage.openshift.io -spec: - group: replication.storage.openshift.io - names: - kind: VolumeReplicationClass - listKind: VolumeReplicationClassList - plural: volumereplicationclasses - shortNames: - - vrc - singular: volumereplicationclass - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.provisioner - name: provisioner - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: VolumeReplicationClass is the Schema for the volumereplicationclasses API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: VolumeReplicationClassSpec specifies parameters that an underlying storage system uses when creating a volume replica. A specific VolumeReplicationClass is used by specifying its name in a VolumeReplication object. - properties: - parameters: - additionalProperties: - type: string - description: Parameters is a key-value map with storage provisioner specific configurations for creating volume replicas - type: object - provisioner: - description: Provisioner is the name of storage provisioner - type: string - required: - - provisioner - type: object - status: - description: VolumeReplicationClassStatus defines the observed state of VolumeReplicationClass - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: volumereplications.replication.storage.openshift.io -spec: - group: replication.storage.openshift.io - names: - kind: VolumeReplication - listKind: VolumeReplicationList - plural: volumereplications - shortNames: - - vr - singular: volumereplication - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.volumeReplicationClass - name: volumeReplicationClass - type: string - - jsonPath: .spec.dataSource.name - name: pvcName - type: string - - jsonPath: .spec.replicationState - name: desiredState - type: string - - jsonPath: .status.state - name: currentState - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: VolumeReplication is the Schema for the volumereplications API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: VolumeReplicationSpec defines the desired state of VolumeReplication - properties: - dataSource: - description: DataSource represents the object associated with the volume - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - replicationState: - description: ReplicationState represents the replication operation to be performed on the volume. Supported operations are "primary", "secondary" and "resync" - enum: - - primary - - secondary - - resync - type: string - volumeReplicationClass: - description: VolumeReplicationClass is the VolumeReplicationClass name for this VolumeReplication resource - type: string - required: - - dataSource - - replicationState - - volumeReplicationClass - type: object - status: - description: VolumeReplicationStatus defines the observed state of VolumeReplication - properties: - conditions: - description: Conditions are the list of conditions and their status. - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - lastCompletionTime: - format: date-time - type: string - lastStartTime: - format: date-time - type: string - message: - type: string - observedGeneration: - description: observedGeneration is the last generation change the operator has dealt with - format: int64 - type: integer - state: - description: State captures the latest state of the replication operation - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: volumes.rook.io -spec: - group: rook.io - names: - kind: Volume - listKind: VolumeList - plural: volumes - shortNames: - - rv - singular: volume - scope: Namespaced - versions: - - name: v1alpha2 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - attachments: - items: - properties: - clusterName: - type: string - mountDir: - type: string - node: - type: string - podName: - type: string - podNamespace: - type: string - readOnly: - type: boolean - required: - - clusterName - - mountDir - - node - - podName - - podNamespace - - readOnly - type: object - type: array - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - required: - - attachments - - metadata - type: object - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/cluster/examples/kubernetes/ceph/create-external-cluster-resources.py b/cluster/examples/kubernetes/ceph/create-external-cluster-resources.py deleted file mode 100644 index e2f4b1fe9..000000000 --- a/cluster/examples/kubernetes/ceph/create-external-cluster-resources.py +++ /dev/null @@ -1,1133 +0,0 @@ -''' -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -''' - -import errno -import sys -import json -import argparse -import unittest -import re -import requests -import subprocess -from os import linesep as LINESEP -from os import path - -# backward compatibility with 2.x -try: - ModuleNotFoundError -except: - ModuleNotFoundError = ImportError - -try: - import rados -except ModuleNotFoundError as noModErr: - print("Error: %s\nExiting the script..." % noModErr) - sys.exit(1) - -try: - # for 2.7.x - from StringIO import StringIO -except ModuleNotFoundError: - # for 3.x - from io import StringIO - -try: - # for 2.7.x - from urlparse import urlparse -except ModuleNotFoundError: - # for 3.x - from urllib.parse import urlparse - - -class ExecutionFailureException(Exception): - pass - -################################################ -################## DummyRados ################## -################################################ -# this is mainly for testing and could be used where 'rados' is not available - - -class DummyRados(object): - def __init__(self): - self.return_val = 0 - self.err_message = '' - self.state = 'connected' - self.cmd_output_map = {} - self.cmd_names = {} - self._init_cmd_output_map() - self.dummy_host_ip_map = {} - - def _init_cmd_output_map(self): - json_file_name = 'test-data/ceph-status-out' - script_dir = path.abspath(path.dirname(__file__)) - ceph_status_str = "" - with open(path.join(script_dir, json_file_name), 'r') as json_file: - ceph_status_str = json_file.read() - self.cmd_names['fs ls'] = '''{"format": "json", "prefix": "fs ls"}''' - self.cmd_names['quorum_status'] = '''{"format": "json", "prefix": "quorum_status"}''' - self.cmd_names['caps_change_default_pool_prefix'] = '''{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth caps"}''' - self.cmd_names['mgr services'] = '''{"format": "json", "prefix": "mgr services"}''' - # all the commands and their output - self.cmd_output_map[self.cmd_names['fs ls'] - ] = '''[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-data0"]}]''' - self.cmd_output_map[self.cmd_names['quorum_status']] = '''{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}''' - self.cmd_output_map[self.cmd_names['mgr services']] = '''{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}''' - self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]''' - self.cmd_output_map['''{"caps": ["mon", "profile rbd", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd","osd":"profile rbd"}}]''' - self.cmd_output_map['''{"caps": ["mon", "profile rbd", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd","osd":"profile rbd"}}]''' - self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs *=*"}}]''' - self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs metadata=*"}}]''' - self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]''' - self.cmd_output_map['''{"format": "json", "prefix": "mgr services"}'''] = '''{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}''' - self.cmd_output_map['''{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}'''] = '''{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}''' - self.cmd_output_map['''{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]''' - self.cmd_output_map[self.cmd_names['caps_change_default_pool_prefix']] = '''[{}]''' - self.cmd_output_map['{"format": "json", "prefix": "status"}'] = ceph_status_str - - def shutdown(self): - pass - - def get_fsid(self): - return 'af4e1673-0b72-402d-990a-22d2919d0f1c' - - def conf_read_file(self): - pass - - def connect(self): - pass - - def pool_exists(self, pool_name): - return True - - def mon_command(self, cmd, out): - json_cmd = json.loads(cmd) - json_cmd_str = json.dumps(json_cmd, sort_keys=True) - cmd_output = self.cmd_output_map[json_cmd_str] - return self.return_val, \ - cmd_output, \ - "{}".format(self.err_message).encode('utf-8') - - def _convert_hostname_to_ip(self, host_name): - ip_reg_x = re.compile(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}') - # if provided host is directly an IP address, return the same - if ip_reg_x.match(host_name): - return host_name - import random - host_ip = self.dummy_host_ip_map.get(host_name, "") - if not host_ip: - host_ip = "172.9.{}.{}".format( - random.randint(0, 254), random.randint(0, 254)) - self.dummy_host_ip_map[host_name] = host_ip - del random - return host_ip - - @classmethod - def Rados(conffile=None): - return DummyRados() - - -class RadosJSON: - EXTERNAL_USER_NAME = "client.healthchecker" - EXTERNAL_RGW_ADMIN_OPS_USER_NAME = "rgw-admin-ops-user" - EMPTY_OUTPUT_LIST = "Empty output list" - DEFAULT_RGW_POOL_PREFIX = "default" - DEFAULT_MONITORING_ENDPOINT_PORT = "9283" - - @classmethod - def gen_arg_parser(cls, args_to_parse=None): - argP = argparse.ArgumentParser() - - common_group = argP.add_argument_group('common') - common_group.add_argument("--verbose", "-v", - action='store_true', default=False) - common_group.add_argument("--ceph-conf", "-c", - help="Provide a ceph conf file.", type=str) - common_group.add_argument("--run-as-user", "-u", default="", type=str, - help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'") - common_group.add_argument("--cluster-name", default="openshift-storage", - help="Ceph cluster name") - common_group.add_argument("--namespace", default="", - help="Namespace where CephCluster is running") - common_group.add_argument("--rgw-pool-prefix", default="", - help="RGW Pool prefix") - - output_group = argP.add_argument_group('output') - output_group.add_argument("--format", "-t", choices=["json", "bash"], - default='json', help="Provides the output format (json | bash)") - output_group.add_argument("--output", "-o", default="", - help="Output will be stored into the provided file") - output_group.add_argument("--cephfs-filesystem-name", default="", - help="Provides the name of the Ceph filesystem") - output_group.add_argument("--cephfs-data-pool-name", default="", - help="Provides the name of the cephfs data pool") - output_group.add_argument("--rbd-data-pool-name", default="", required=False, - help="Provides the name of the RBD datapool") - output_group.add_argument("--rgw-endpoint", default="", required=False, - help="Rados GateWay endpoint (in : format)") - output_group.add_argument("--rgw-tls-cert-path", default="", required=False, - help="Rados GateWay endpoint TLS certificate") - output_group.add_argument("--rgw-skip-tls", required=False, default=False, - help="Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED") - output_group.add_argument("--monitoring-endpoint", default="", required=False, - help="Ceph Manager prometheus exporter endpoints (comma separated list of entries of active and standby mgrs)") - output_group.add_argument("--monitoring-endpoint-port", default="", required=False, - help="Ceph Manager prometheus exporter port") - - upgrade_group = argP.add_argument_group('upgrade') - upgrade_group.add_argument("--upgrade", action='store_true', default=False, - help="Upgrades the 'user' with all the permissions needed for the new cluster version") - - if args_to_parse: - assert type(args_to_parse) == list, \ - "Argument to 'gen_arg_parser' should be a list" - else: - args_to_parse = sys.argv[1:] - return argP.parse_args(args_to_parse) - - def validate_rgw_endpoint_tls_cert(self): - if self._arg_parser.rgw_tls_cert_path: - with open(self._arg_parser.rgw_tls_cert_path, encoding='utf8') as f: - contents = f.read() - return contents.rstrip() - - def _check_conflicting_options(self): - if not self._arg_parser.upgrade and not self._arg_parser.rbd_data_pool_name: - raise ExecutionFailureException( - "Either '--upgrade' or '--rbd-data-pool-name ' should be specified") - if self._arg_parser.upgrade and self._arg_parser.rbd_data_pool_name: - raise ExecutionFailureException( - "Both '--upgrade' and '--rbd-data-pool-name ' should not be specified, choose only one") - # a user name must be provided while using '--upgrade' option - if not self._arg_parser.run_as_user and self._arg_parser.upgrade: - raise ExecutionFailureException( - "Please provide an existing user-name through '--run-as-user' (or '-u') flag while upgrading") - - def _invalid_endpoint(self, endpoint_str): - try: - ipv4, port = endpoint_str.split(':') - except ValueError: - raise ExecutionFailureException( - "Not a proper endpoint: {}, :, format is expected".format(endpoint_str)) - ipParts = ipv4.split('.') - if len(ipParts) != 4: - raise ExecutionFailureException( - "Not a valid IP address: {}".format(ipv4)) - for eachPart in ipParts: - if not eachPart.isdigit(): - raise ExecutionFailureException( - "IP address parts should be numbers: {}".format(ipv4)) - intPart = int(eachPart) - if intPart < 0 or intPart > 254: - raise ExecutionFailureException( - "Out of range IP addresses: {}".format(ipv4)) - if not port.isdigit(): - raise ExecutionFailureException("Port not valid: {}".format(port)) - intPort = int(port) - if intPort < 1 or intPort > 2**16-1: - raise ExecutionFailureException( - "Out of range port number: {}".format(port)) - return False - - def endpoint_dial(self, endpoint_str, timeout=3, cert=None): - # if the 'cluster' instance is a dummy one, - # don't try to reach out to the endpoint - if isinstance(self.cluster, DummyRados): - return - protocols = ["http", "https"] - for prefix in protocols: - try: - ep = "{}://{}".format(prefix, endpoint_str) - # If verify is set to a path to a directory, - # the directory must have been processed using the c_rehash utility supplied with OpenSSL. - if prefix == "https" and cert and self._arg_parser.rgw_skip_tls: - r = requests.head(ep, timeout=timeout, verify=False) - elif prefix == "https" and cert: - r = requests.head(ep, timeout=timeout, verify=cert) - else: - r = requests.head(ep, timeout=timeout) - if r.status_code == 200: - return - except: - continue - raise ExecutionFailureException( - "unable to connect to endpoint: {}".format(endpoint_str)) - - def __init__(self, arg_list=None): - self.out_map = {} - self._excluded_keys = set() - self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list) - self._check_conflicting_options() - self.run_as_user = self._arg_parser.run_as_user - self.output_file = self._arg_parser.output - self.ceph_conf = self._arg_parser.ceph_conf - self.MIN_USER_CAP_PERMISSIONS = { - 'mgr': 'allow command config', - 'mon': 'allow r, allow command quorum_status, allow command version', - 'osd': "allow rwx pool={0}.rgw.meta, " + - "allow r pool=.rgw.root, " + - "allow rw pool={0}.rgw.control, " + - "allow rx pool={0}.rgw.log, " + - "allow x pool={0}.rgw.buckets.index" - } - # if user not provided, give a default user - if not self.run_as_user and not self._arg_parser.upgrade: - self.run_as_user = self.EXTERNAL_USER_NAME - if not self._arg_parser.rgw_pool_prefix and not self._arg_parser.upgrade: - self._arg_parser.rgw_pool_prefix = self.DEFAULT_RGW_POOL_PREFIX - if self.ceph_conf: - self.cluster = rados.Rados(conffile=self.ceph_conf) - else: - self.cluster = rados.Rados() - self.cluster.conf_read_file() - self.cluster.connect() - - def shutdown(self): - if self.cluster.state == "connected": - self.cluster.shutdown() - - def get_fsid(self): - return str(self.cluster.get_fsid()) - - def _common_cmd_json_gen(self, cmd_json): - cmd = json.dumps(cmd_json, sort_keys=True) - ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b'') - if self._arg_parser.verbose: - print("Command Input: {}".format(cmd)) - print("Return Val: {}\nCommand Output: {}\nError Message: {}\n----------\n".format( - ret_val, cmd_out, err_msg)) - json_out = {} - # if there is no error (i.e; ret_val is ZERO) and 'cmd_out' is not empty - # then convert 'cmd_out' to a json output - if ret_val == 0 and cmd_out: - json_out = json.loads(cmd_out) - return ret_val, json_out, err_msg - - def get_ceph_external_mon_data(self): - cmd_json = {"prefix": "quorum_status", "format": "json"} - ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) - # if there is an unsuccessful attempt, - if ret_val != 0 or len(json_out) == 0: - raise ExecutionFailureException( - "'quorum_status' command failed.\n" + - "Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST)) - q_leader_name = json_out['quorum_leader_name'] - q_leader_details = {} - q_leader_matching_list = [l for l in json_out['monmap']['mons'] - if l['name'] == q_leader_name] - if len(q_leader_matching_list) == 0: - raise ExecutionFailureException("No matching 'mon' details found") - q_leader_details = q_leader_matching_list[0] - # get the address vector of the quorum-leader - q_leader_addrvec = q_leader_details.get( - 'public_addrs', {}).get('addrvec', []) - # if the quorum-leader has only one address in the address-vector - # and it is of type 'v2' (ie; with :3300), - # raise an exception to make user aware that - # they have to enable 'v1' (ie; with :6789) type as well - if len(q_leader_addrvec) == 1 and q_leader_addrvec[0]['type'] == 'v2': - raise ExecutionFailureException( - "Only 'v2' address type is enabled, user should also enable 'v1' type as well") - ip_port = str(q_leader_details['public_addr'].split('/')[0]) - return "{}={}".format(str(q_leader_name), ip_port) - - def _join_host_port(self, endpoint, port): - port = "{}".format(port) - # regex to check the given endpoint is enclosed in square brackets - ipv6_regx = re.compile(r'^\[[^]]*\]$') - # endpoint has ':' in it and if not (already) enclosed in square brackets - if endpoint.count(':') and not ipv6_regx.match(endpoint): - endpoint = '[{}]'.format(endpoint) - if not port: - return endpoint - return ':'.join([endpoint, port]) - - def _convert_hostname_to_ip(self, host_name): - # if 'cluster' instance is a dummy type, - # call the dummy instance's "convert" method - if not host_name: - raise ExecutionFailureException("Empty hostname provided") - if isinstance(self.cluster, DummyRados): - return self.cluster._convert_hostname_to_ip(host_name) - import socket - ip = socket.gethostbyname(host_name) - del socket - return ip - - def get_active_and_standby_mgrs(self): - monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port - monitoring_endpoint_ip = self._arg_parser.monitoring_endpoint - standby_mgrs = [] - if not monitoring_endpoint_ip: - cmd_json = {"prefix": "status", "format": "json"} - ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) - # if there is an unsuccessful attempt, - if ret_val != 0 or len(json_out) == 0: - raise ExecutionFailureException( - "'mgr services' command failed.\n" + - "Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST)) - monitoring_endpoint = json_out.get('mgrmap', {}).get( - 'services', {}).get('prometheus', '') - if not monitoring_endpoint: - raise ExecutionFailureException( - "'prometheus' service not found, is the exporter enabled?'.\n") - # now check the stand-by mgr-s - standby_arr = json_out.get('mgrmap', {}).get('standbys', []) - for each_standby in standby_arr: - if 'name' in each_standby.keys(): - standby_mgrs.append(each_standby['name']) - try: - parsed_endpoint = urlparse(monitoring_endpoint) - except ValueError: - raise ExecutionFailureException( - "invalid endpoint: {}".format(monitoring_endpoint)) - monitoring_endpoint_ip = parsed_endpoint.hostname - if not monitoring_endpoint_port: - monitoring_endpoint_port = "{}".format(parsed_endpoint.port) - - # if monitoring endpoint port is not set, put a default mon port - if not monitoring_endpoint_port: - monitoring_endpoint_port = self.DEFAULT_MONITORING_ENDPOINT_PORT - - try: - failed_ip = monitoring_endpoint_ip - monitoring_endpoint_ip = self._convert_hostname_to_ip( - monitoring_endpoint_ip) - # collect all the 'stand-by' mgr ips - mgr_ips = [] - for each_standby_mgr in standby_mgrs: - failed_ip = each_standby_mgr - mgr_ips.append( - self._convert_hostname_to_ip(each_standby_mgr)) - except: - raise ExecutionFailureException( - "Conversion of host: {} to IP failed. " - "Please enter the IP addresses of all the ceph-mgrs with the '--monitoring-endpoint' flag".format(failed_ip)) - monitoring_endpoint = self._join_host_port( - monitoring_endpoint_ip, monitoring_endpoint_port) - self._invalid_endpoint(monitoring_endpoint) - self.endpoint_dial(monitoring_endpoint) - - # add the validated active mgr IP into the first index - mgr_ips.insert(0, monitoring_endpoint_ip) - all_mgr_ips_str = ",".join(mgr_ips) - return all_mgr_ips_str, monitoring_endpoint_port - - def create_cephCSIKeyring_cephFSProvisioner(self): - ''' - command: ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*' - ''' - cmd_json = {"prefix": "auth get-or-create", - "entity": "client.csi-cephfs-provisioner", - "caps": ["mon", "allow r", "mgr", "allow rw", - "osd", "allow rw tag cephfs metadata=*"], - "format": "json"} - ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) - # if there is an unsuccessful attempt, - if ret_val != 0 or len(json_out) == 0: - raise ExecutionFailureException( - "'auth get-or-create client.csi-cephfs-provisioner' command failed.\n" + - "Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST)) - return str(json_out[0]['key']) - - def create_cephCSIKeyring_cephFSNode(self): - cmd_json = {"prefix": "auth get-or-create", - "entity": "client.csi-cephfs-node", - "caps": ["mon", "allow r", - "mgr", "allow rw", - "osd", "allow rw tag cephfs *=*", - "mds", "allow rw"], - "format": "json"} - ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) - # if there is an unsuccessful attempt, - if ret_val != 0 or len(json_out) == 0: - raise ExecutionFailureException( - "'auth get-or-create client.csi-cephfs-node' command failed.\n" + - "Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST)) - return str(json_out[0]['key']) - - def create_cephCSIKeyring_RBDProvisioner(self): - cmd_json = {"prefix": "auth get-or-create", - "entity": "client.csi-rbd-provisioner", - "caps": ["mon", "profile rbd", - "mgr", "allow rw", - "osd", "profile rbd"], - "format": "json"} - ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) - # if there is an unsuccessful attempt, - if ret_val != 0 or len(json_out) == 0: - raise ExecutionFailureException( - "'auth get-or-create client.csi-rbd-provisioner' command failed.\n" + - "Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST)) - return str(json_out[0]['key']) - - def get_cephfs_data_pool_details(self): - cmd_json = {"prefix": "fs ls", "format": "json"} - ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) - # if there is an unsuccessful attempt, report an error - if ret_val != 0: - # if fs and data_pool arguments are not set, silently return - if self._arg_parser.cephfs_filesystem_name == "" and self._arg_parser.cephfs_data_pool_name == "": - return - # if user has provided any of the - # '--cephfs-filesystem-name' or '--cephfs-data-pool-name' arguments, - # raise an exception as we are unable to verify the args - raise ExecutionFailureException( - "'fs ls' ceph call failed with error: {}".format(err_msg)) - - matching_json_out = {} - # if '--cephfs-filesystem-name' argument is provided, - # check whether the provided filesystem-name exists or not - if self._arg_parser.cephfs_filesystem_name: - # get the matching list - matching_json_out_list = [matched for matched in json_out - if str(matched['name']) == self._arg_parser.cephfs_filesystem_name] - # unable to find a matching fs-name, raise an error - if len(matching_json_out_list) == 0: - raise ExecutionFailureException( - ("Filesystem provided, '{}', " + - "is not found in the fs-list: '{}'").format( - self._arg_parser.cephfs_filesystem_name, - [str(x['name']) for x in json_out])) - matching_json_out = matching_json_out_list[0] - # if cephfs filesystem name is not provided, - # try to get a default fs name by doing the following - else: - # a. check if there is only one filesystem is present - if len(json_out) == 1: - matching_json_out = json_out[0] - # b. or else, check if data_pool name is provided - elif self._arg_parser.cephfs_data_pool_name: - # and if present, check whether there exists a fs which has the data_pool - for eachJ in json_out: - if self._arg_parser.cephfs_data_pool_name in eachJ['data_pools']: - matching_json_out = eachJ - break - # if there is no matching fs exists, that means provided data_pool name is invalid - if not matching_json_out: - raise ExecutionFailureException( - "Provided data_pool name, {}, does not exists".format( - self._arg_parser.cephfs_data_pool_name)) - # c. if nothing is set and couldn't find a default, - else: - # just return silently - return - - if matching_json_out: - self._arg_parser.cephfs_filesystem_name = str( - matching_json_out['name']) - - if type(matching_json_out['data_pools']) == list: - # if the user has already provided data-pool-name, - # through --cephfs-data-pool-name - if self._arg_parser.cephfs_data_pool_name: - # if the provided name is not matching with the one in the list - if self._arg_parser.cephfs_data_pool_name not in matching_json_out['data_pools']: - raise ExecutionFailureException( - "{}: '{}', {}: {}".format( - "Provided data-pool-name", - self._arg_parser.cephfs_data_pool_name, - "doesn't match from the data-pools' list", - [str(x) for x in matching_json_out['data_pools']])) - # if data_pool name is not provided, - # then try to find a default data pool name - else: - # if no data_pools exist, silently return - if len(matching_json_out['data_pools']) == 0: - return - self._arg_parser.cephfs_data_pool_name = str( - matching_json_out['data_pools'][0]) - # if there are more than one 'data_pools' exist, - # then warn the user that we are using the selected name - if len(matching_json_out['data_pools']) > 1: - print("{}: {}\n{}: '{}'\n".format( - "WARNING: Multiple data pools detected", - [str(x) for x in matching_json_out['data_pools']], - "Using the data-pool", - self._arg_parser.cephfs_data_pool_name)) - - def create_cephCSIKeyring_RBDNode(self): - cmd_json = {"prefix": "auth get-or-create", - "entity": "client.csi-rbd-node", - "caps": ["mon", "profile rbd", - "osd", "profile rbd"], - "format": "json"} - ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) - # if there is an unsuccessful attempt, - if ret_val != 0 or len(json_out) == 0: - raise ExecutionFailureException( - "'auth get-or-create client.csi-rbd-node' command failed\n" + - "Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST)) - return str(json_out[0]['key']) - - def create_checkerKey(self): - cmd_json = {"prefix": "auth get-or-create", - "entity": self.run_as_user, - "caps": ["mon", self.MIN_USER_CAP_PERMISSIONS['mon'], - "mgr", self.MIN_USER_CAP_PERMISSIONS['mgr'], - "osd", self.MIN_USER_CAP_PERMISSIONS['osd'].format(self._arg_parser.rgw_pool_prefix)], - "format": "json"} - ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) - # if there is an unsuccessful attempt, - if ret_val != 0 or len(json_out) == 0: - raise ExecutionFailureException( - "'auth get-or-create {}' command failed\n".format(self.run_as_user) + - "Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST)) - return str(json_out[0]['key']) - - def get_ceph_dashboard_link(self): - cmd_json = {"prefix": "mgr services", "format": "json"} - ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json) - # if there is an unsuccessful attempt, - if ret_val != 0 or len(json_out) == 0: - return None - if not 'dashboard' in json_out: - return None - return json_out['dashboard'] - - def create_rgw_admin_ops_user(self): - cmd = ['radosgw-admin', 'user', 'create', '--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, '--display-name', - 'Rook RGW Admin Ops user', '--caps', 'buckets=*;users=*;usage=read;metadata=read;zone=read'] - try: - output = subprocess.check_output(cmd, - stderr=subprocess.PIPE) - except subprocess.CalledProcessError as exec: - # if the user already exists, we just query it - if exec.returncode == errno.EEXIST: - cmd = ['radosgw-admin', 'user', 'info', - '--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME - ] - try: - output = subprocess.check_output(cmd, - stderr=subprocess.PIPE) - except subprocess.CalledProcessError as exec: - err_msg = "failed to execute command %s. Output: %s. Code: %s. Error: %s" % ( - cmd, exec.output, exec.returncode, exec.stderr) - raise Exception(err_msg) - else: - err_msg = "failed to execute command %s. Output: %s. Code: %s. Error: %s" % ( - cmd, exec.output, exec.returncode, exec.stderr) - raise Exception(err_msg) - - jsonoutput = json.loads(output) - return jsonoutput["keys"][0]['access_key'], jsonoutput["keys"][0]['secret_key'] - - def _gen_output_map(self): - if self.out_map: - return - pools_to_validate = [self._arg_parser.rbd_data_pool_name] - # if rgw_endpoint is provided, validate it - if self._arg_parser.rgw_endpoint: - self._invalid_endpoint(self._arg_parser.rgw_endpoint) - self.endpoint_dial(self._arg_parser.rgw_endpoint, - cert=self.validate_rgw_endpoint_tls_cert()) - rgw_pool_to_validate = ["{0}.rgw.meta".format(self._arg_parser.rgw_pool_prefix), - ".rgw.root", - "{0}.rgw.control".format( - self._arg_parser.rgw_pool_prefix), - "{0}.rgw.log".format( - self._arg_parser.rgw_pool_prefix)] - pools_to_validate.extend(rgw_pool_to_validate) - - for pool in pools_to_validate: - if not self.cluster.pool_exists(pool): - raise ExecutionFailureException( - "The provided pool, '{}', does not exist".format(pool)) - self._excluded_keys.add('CLUSTER_NAME') - self.get_cephfs_data_pool_details() - self.out_map['NAMESPACE'] = self._arg_parser.namespace - self.out_map['CLUSTER_NAME'] = self._arg_parser.cluster_name - self.out_map['ROOK_EXTERNAL_FSID'] = self.get_fsid() - self.out_map['ROOK_EXTERNAL_USERNAME'] = self.run_as_user - self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'] = self.get_ceph_external_mon_data() - self.out_map['ROOK_EXTERNAL_USER_SECRET'] = self.create_checkerKey() - self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK'] = self.get_ceph_dashboard_link() - self.out_map['CSI_RBD_NODE_SECRET_SECRET'] = self.create_cephCSIKeyring_RBDNode() - self.out_map['CSI_RBD_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_RBDProvisioner() - self.out_map['CEPHFS_POOL_NAME'] = self._arg_parser.cephfs_data_pool_name - self.out_map['CEPHFS_FS_NAME'] = self._arg_parser.cephfs_filesystem_name - self.out_map['CSI_CEPHFS_NODE_SECRET'] = '' - self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = '' - # create CephFS node and provisioner keyring only when MDS exists - if self.out_map['CEPHFS_FS_NAME'] and self.out_map['CEPHFS_POOL_NAME']: - self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode( - ) - self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_cephFSProvisioner() - self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint - self.out_map['RGW_TLS_CERT'] = '' - self.out_map['MONITORING_ENDPOINT'], \ - self.out_map['MONITORING_ENDPOINT_PORT'] = self.get_active_and_standby_mgrs() - self.out_map['RBD_POOL_NAME'] = self._arg_parser.rbd_data_pool_name - self.out_map['RGW_POOL_PREFIX'] = self._arg_parser.rgw_pool_prefix - if self._arg_parser.rgw_endpoint: - self.out_map['ACCESS_KEY'], self.out_map['SECRET_KEY'] = self.create_rgw_admin_ops_user() - if self._arg_parser.rgw_tls_cert_path: - self.out_map['RGW_TLS_CERT'] = self.validate_rgw_endpoint_tls_cert() - - def gen_shell_out(self): - self._gen_output_map() - shOutIO = StringIO() - for k, v in self.out_map.items(): - if v and k not in self._excluded_keys: - shOutIO.write('export {}={}{}'.format(k, v, LINESEP)) - shOut = shOutIO.getvalue() - shOutIO.close() - return shOut - - def gen_json_out(self): - self._gen_output_map() - json_out = [ - { - "name": "rook-ceph-mon-endpoints", - "kind": "ConfigMap", - "data": { - "data": self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'], - "maxMonId": "0", - "mapping": "{}" - } - }, - { - "name": "rook-ceph-mon", - "kind": "Secret", - "data": { - "admin-secret": "admin-secret", - "fsid": self.out_map['ROOK_EXTERNAL_FSID'], - "mon-secret": "mon-secret" - }, - }, - { - "name": "rook-ceph-operator-creds", - "kind": "Secret", - "data": { - "userID": self.out_map['ROOK_EXTERNAL_USERNAME'], - "userKey": self.out_map['ROOK_EXTERNAL_USER_SECRET'] - } - }, - { - "name": "rook-csi-rbd-node", - "kind": "Secret", - "data": { - "userID": 'csi-rbd-node', - "userKey": self.out_map['CSI_RBD_NODE_SECRET_SECRET'] - } - }, - { - "name": "ceph-rbd", - "kind": "StorageClass", - "data": { - "pool": self.out_map['RBD_POOL_NAME'] - } - }, - { - "name": "monitoring-endpoint", - "kind": "CephCluster", - "data": { - "MonitoringEndpoint": self.out_map['MONITORING_ENDPOINT'], - "MonitoringPort": self.out_map['MONITORING_ENDPOINT_PORT'] - } - } - ] - - # if 'ROOK_EXTERNAL_DASHBOARD_LINK' exists, then only add 'rook-ceph-dashboard-link' Secret - if self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK']: - json_out.append({ - "name": "rook-ceph-dashboard-link", - "kind": "Secret", - "data": { - "userID": 'ceph-dashboard-link', - "userKey": self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK'] - } - }) - # if 'CSI_RBD_PROVISIONER_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret - if self.out_map['CSI_RBD_PROVISIONER_SECRET']: - json_out.append({ - "name": "rook-csi-rbd-provisioner", - "kind": "Secret", - "data": { - "userID": 'csi-rbd-provisioner', - "userKey": self.out_map['CSI_RBD_PROVISIONER_SECRET'] - }, - }) - # if 'CSI_CEPHFS_PROVISIONER_SECRET' exists, then only add 'rook-csi-cephfs-provisioner' Secret - if self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']: - json_out.append({ - "name": "rook-csi-cephfs-provisioner", - "kind": "Secret", - "data": { - "adminID": 'csi-cephfs-provisioner', - "adminKey": self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] - }, - }) - # if 'CSI_CEPHFS_NODE_SECRET' exists, then only add 'rook-csi-cephfs-node' Secret - if self.out_map['CSI_CEPHFS_NODE_SECRET']: - json_out.append({ - "name": "rook-csi-cephfs-node", - "kind": "Secret", - "data": { - "adminID": 'csi-cephfs-node', - "adminKey": self.out_map['CSI_CEPHFS_NODE_SECRET'] - } - }) - # if 'CEPHFS_FS_NAME' exists, then only add 'cephfs' StorageClass - if self.out_map['CEPHFS_FS_NAME']: - json_out.append({ - "name": "cephfs", - "kind": "StorageClass", - "data": { - "fsName": self.out_map['CEPHFS_FS_NAME'], - "pool": self.out_map['CEPHFS_POOL_NAME'] - } - }) - # if 'RGW_ENDPOINT' exists, then only add 'ceph-rgw' StorageClass - if self.out_map['RGW_ENDPOINT']: - json_out.append({ - "name": "ceph-rgw", - "kind": "StorageClass", - "data": { - "endpoint": self.out_map['RGW_ENDPOINT'], - "poolPrefix": self.out_map['RGW_POOL_PREFIX'] - } - }) - json_out.append( - { - "name": "rgw-admin-ops-user", - "kind": "Secret", - "data": { - "accessKey": self.out_map['ACCESS_KEY'], - "secretKey": self.out_map['SECRET_KEY'] - } - }) - # if 'RGW_TLS_CERT' exists, then only add the "ceph-rgw-tls-cert" secret - if self.out_map['RGW_TLS_CERT']: - json_out.append({ - "name": "ceph-rgw-tls-cert", - "kind": "Secret", - "data": { - "cert": self.out_map['RGW_TLS_CERT'], - } - }) - return json.dumps(json_out)+LINESEP - - def upgrade_user_permissions(self): - # check whether the given user exists or not - cmd_json = {"prefix": "auth get", "entity": "{}".format( - self.run_as_user), "format": "json"} - ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) - if ret_val != 0 or len(json_out) == 0: - raise ExecutionFailureException("'auth get {}' command failed.\n".format(self.run_as_user) + - "Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST)) - j_first = json_out[0] - existing_caps = j_first['caps'] - osd_cap = "osd" - cap_keys = ["mon", "mgr", "osd"] - for eachCap in cap_keys: - min_cap_values = self.MIN_USER_CAP_PERMISSIONS.get(eachCap, '') - cur_cap_values = existing_caps.get(eachCap, '') - # detect rgw-pool-prefix - if eachCap == osd_cap: - # if directly provided through '--rgw-pool-prefix' argument, use it - if self._arg_parser.rgw_pool_prefix: - min_cap_values = min_cap_values.format( - self._arg_parser.rgw_pool_prefix) - # or else try to detect one from the existing/current osd cap values - else: - rc = re.compile(r' pool=([^.]+)\.rgw\.[^ ]*') - # 'findall()' method will give a list of prefixes - # and 'set' will eliminate any duplicates - cur_rgw_pool_prefix_list = list( - set(rc.findall(cur_cap_values))) - if len(cur_rgw_pool_prefix_list) != 1: - raise ExecutionFailureException( - "Unable to determine 'rgw-pool-prefx'. Please provide one with '--rgw-pool-prefix' flag") - min_cap_values = min_cap_values.format( - cur_rgw_pool_prefix_list[0]) - cur_cap_perm_list = [x.strip() - for x in cur_cap_values.split(',') if x.strip()] - min_cap_perm_list = [x.strip() - for x in min_cap_values.split(',') if x.strip()] - min_cap_perm_list.extend(cur_cap_perm_list) - # eliminate duplicates without using 'set' - # set re-orders items in the list and we have to keep the order - new_cap_perm_list = [] - [new_cap_perm_list.append( - x) for x in min_cap_perm_list if x not in new_cap_perm_list] - existing_caps[eachCap] = ", ".join(new_cap_perm_list) - cmd_json = {"prefix": "auth caps", - "entity": self.run_as_user, - "caps": ["mon", existing_caps["mon"], - "mgr", existing_caps["mgr"], - "osd", existing_caps["osd"]], - "format": "json"} - ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) - if ret_val != 0: - raise ExecutionFailureException("'auth caps {}' command failed.\n".format(self.run_as_user) + - "Error: {}".format(err_msg)) - print("Updated user, {}, successfully.".format(self.run_as_user)) - - def main(self): - generated_output = '' - if self._arg_parser.upgrade: - self.upgrade_user_permissions() - elif self._arg_parser.format == 'json': - generated_output = self.gen_json_out() - elif self._arg_parser.format == 'bash': - generated_output = self.gen_shell_out() - else: - raise ExecutionFailureException("Unsupported format: {}".format( - self._arg_parser.format)) - print('{}'.format(generated_output)) - if self.output_file and generated_output: - fOut = open(self.output_file, 'w') - fOut.write(generated_output) - fOut.close() - - -################################################ -##################### MAIN ##################### -################################################ -if __name__ == '__main__': - rjObj = RadosJSON() - try: - rjObj.main() - except ExecutionFailureException as err: - print("Execution Failed: {}".format(err)) - raise err - except KeyError as kErr: - print("KeyError: %s", kErr) - except OSError as osErr: - print("Error while trying to output the data: {}".format(osErr)) - finally: - rjObj.shutdown() - - -################################################ -##################### TEST ##################### -################################################ -# inorder to test the package, -# cd -# python -m unittest --verbose -class TestRadosJSON(unittest.TestCase): - def setUp(self): - print("\nI am in setup") - self.rjObj = RadosJSON(['--rbd-data-pool-name=abc', - '--rgw-endpoint=10.10.212.122:9000', '--format=json']) - # for testing, we are using 'DummyRados' object - self.rjObj.cluster = DummyRados.Rados() - - def tearDown(self): - print("\nI am tearing down the setup\n") - self.rjObj.shutdown() - - def test_method_main_output(self): - print("JSON Output") - self.rjObj._arg_parser.format = "json" - self.rjObj.main() - print("\n\nShell Output") - self.rjObj._arg_parser.format = "bash" - self.rjObj.main() - print("\n\nNon compatible output (--abcd)") - try: - self.rjObj._arg_parser.format = 'abcd' - self.rjObj.main() - self.fail("Function should have thrown an Exception") - except ExecutionFailureException as err: - print("Exception thrown successfully: {}".format(err)) - - def test_method_create_cephCSIKeyring_cephFSProvisioner(self): - csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner() - print("{}".format(csiKeyring)) - - def test_non_zero_return_and_error(self): - self.rjObj.cluster.return_val = 1 - self.rjObj.cluster.err_message = "Dummy Error" - try: - self.rjObj.create_checkerKey() - self.fail("Failed to raise an exception, 'ExecutionFailureException'") - except ExecutionFailureException as err: - print("Successfully thrown error.\nError: {}".format(err)) - - def test_multi_filesystem_scenario(self): - cmd_key = self.rjObj.cluster.cmd_names['fs ls'] - cmd_out = self.rjObj.cluster.cmd_output_map[cmd_key] - cmd_json_out = json.loads(cmd_out) - second_fs_details = dict(cmd_json_out[0]) - second_fs_details['name'] += '-2' - cmd_json_out.append(second_fs_details) - self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out) - # multiple filesystem present, - # but no specific '--cephfs-filesystem-name' argument provided - try: - self.rjObj.get_cephfs_data_pool_details() - print("As we are returning silently, no error thrown as expected") - except ExecutionFailureException as err: - self.fail( - "Supposed to get returned silently, but instead error thrown: {}".format(err)) - # pass an existing filesystem name - try: - self.rjObj._arg_parser.cephfs_filesystem_name = second_fs_details['name'] - self.rjObj.get_cephfs_data_pool_details() - except ExecutionFailureException as err: - self.fail("Should not have thrown error: {}".format(err)) - # pass a non-existing filesystem name - try: - self.rjObj._arg_parser.cephfs_filesystem_name += "-non-existing-fs-name" - self.rjObj.get_cephfs_data_pool_details() - self.fail("An Exception was expected to be thrown") - except ExecutionFailureException as err: - print("Successfully thrown error: {}".format(err)) - # empty file-system array - try: - self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps([]) - self.rjObj.get_cephfs_data_pool_details() - self.fail("An Exception was expected to be thrown") - except ExecutionFailureException as err: - print("Successfully thrown error: {}".format(err)) - - def test_multi_data_pool_scenario(self): - cmd_key = self.rjObj.cluster.cmd_names['fs ls'] - cmd_out = self.rjObj.cluster.cmd_output_map[cmd_key] - cmd_json_out = json.loads(cmd_out) - first_fs_details = cmd_json_out[0] - new_data_pool_name = 'myfs-data1' - first_fs_details['data_pools'].append(new_data_pool_name) - print("Modified JSON Cmd Out: {}".format(cmd_json_out)) - self.rjObj._arg_parser.cephfs_data_pool_name = new_data_pool_name - self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out) - self.rjObj.get_cephfs_data_pool_details() - # use a non-existing data-pool-name - bad_data_pool_name = 'myfs-data3' - self.rjObj._arg_parser.cephfs_data_pool_name = bad_data_pool_name - try: - self.rjObj.get_cephfs_data_pool_details() - self.fail("An Exception was expected to be thrown") - except ExecutionFailureException as err: - print("Successfully thrown error: {}".format(err)) - # empty data-pool scenario - first_fs_details['data_pools'] = [] - self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out) - try: - self.rjObj.get_cephfs_data_pool_details() - self.fail("An Exception was expected to be thrown") - except ExecutionFailureException as err: - print("Successfully thrown error: {}".format(err)) - - def test_valid_rgw_endpoint(self): - self.rjObj._invalid_endpoint("10.10.212.133:8000") - # invalid port - try: - self.rjObj._invalid_endpoint("10.10.212.133:238000") - self.fail("An Exception was expected to be thrown") - except ExecutionFailureException as err: - print("Successfully thrown error: {}".format(err)) - # out of range IP - try: - self.rjObj._invalid_endpoint("10.1033.212.133:8000") - self.fail("An Exception was expected to be thrown") - except ExecutionFailureException as err: - print("Successfully thrown error: {}".format(err)) - # mal formatted IP - try: - self.rjObj._invalid_endpoint("10.103..212.133:8000") - self.fail("An Exception was expected to be thrown") - except ExecutionFailureException as err: - print("Successfully thrown error: {}".format(err)) - try: - self.rjObj._invalid_endpoint("10.103.212.133::8000") - self.fail("An Exception was expected to be thrown") - except ExecutionFailureException as err: - print("Successfully thrown error: {}".format(err)) - try: - self.rjObj._invalid_endpoint("10.10.103.212.133:8000") - self.fail("An Exception was expected to be thrown") - except ExecutionFailureException as err: - print("Successfully thrown error: {}".format(err)) - - def add_non_default_pool_prefix_cmd(self, non_default_pool_prefix): - json_cmd = json.loads( - self.rjObj.cluster.cmd_names['caps_change_default_pool_prefix']) - cur_osd_caps = json_cmd['caps'][json_cmd['caps'].index('osd') + 1] - new_osd_caps = cur_osd_caps.replace( - 'default.', '{}.'.format(non_default_pool_prefix)) - all_osd_caps = "{}, {}".format(new_osd_caps, cur_osd_caps) - caps_list = [x.strip() for x in all_osd_caps.split(',') if x.strip()] - new_caps_list = [] - [new_caps_list.append(x) for x in caps_list if x not in new_caps_list] - all_osd_caps = ", ".join(new_caps_list) - json_cmd['caps'][json_cmd['caps'].index('osd') + 1] = all_osd_caps - self.rjObj.cluster.cmd_names['caps_change_non_default_pool_prefix'] = json.dumps( - json_cmd) - self.rjObj.cluster.cmd_output_map[ - self.rjObj.cluster.cmd_names['caps_change_non_default_pool_prefix']] = '[{}]' - - def test_upgrade_user_permissions(self): - self.rjObj = RadosJSON( - ['--upgrade', '--run-as-user=client.healthchecker']) - # for testing, we are using 'DummyRados' object - self.rjObj.cluster = DummyRados.Rados() - self.rjObj.main() - self.rjObj = RadosJSON( - ['--upgrade', '--run-as-user=client.healthchecker', '--rgw-pool-prefix=nonDefault']) - self.rjObj.cluster = DummyRados.Rados() - self.add_non_default_pool_prefix_cmd('nonDefault') - self.rjObj.main() - - def test_monitoring_endpoint_validation(self): - self.rjObj = RadosJSON(['--rbd-data-pool-name=abc', '--format=json']) - self.rjObj.cluster = DummyRados.Rados() - - valid_ip_ports = [("10.22.31.131", "3534"), - ("10.177.3.81", ""), ("", ""), ("", "9092")] - for each_ip_port_pair in valid_ip_ports: - # reset monitoring ip and port - self.rjObj._arg_parser.monitoring_endpoint = '' - self.rjObj._arg_parser.monitoring_endpoint_port = '' - new_mon_ip, new_mon_port = each_ip_port_pair - check_ip_val = self.rjObj.cluster.dummy_host_ip_map.get( - new_mon_ip, new_mon_ip) - check_port_val = RadosJSON.DEFAULT_MONITORING_ENDPOINT_PORT - if new_mon_ip: - self.rjObj._arg_parser.monitoring_endpoint = new_mon_ip - if new_mon_port: - check_port_val = new_mon_port - self.rjObj._arg_parser.monitoring_endpoint_port = new_mon_port - # for testing, we are using 'DummyRados' object - mon_ips, mon_port = self.rjObj.get_active_and_standby_mgrs() - mon_ip = mon_ips.split(",")[0] - if check_ip_val and check_ip_val != mon_ip: - self.fail("Expected IP: {}, Returned IP: {}".format( - check_ip_val, mon_ip)) - if check_port_val and check_port_val != mon_port: - self.fail("Expected Port: '{}', Returned Port: '{}'".format( - check_port_val, mon_port)) - print("MonIP: {}, MonPort: {}".format(mon_ip, mon_port)) - - invalid_ip_ports = [("10.22.31.131.43", "5334"), ("", "91943"), - ("10.177.3.81", "90320"), ("", "73422"), ("10.232.12.8", "90922")] - for each_ip_port_pair in invalid_ip_ports: - # reset the command-line monitoring args - self.rjObj._arg_parser.monitoring_endpoint = '' - self.rjObj._arg_parser.monitoring_endpoint_port = '' - new_mon_ip, new_mon_port = each_ip_port_pair - if new_mon_ip: - self.rjObj._arg_parser.monitoring_endpoint = new_mon_ip - if new_mon_port: - self.rjObj._arg_parser.monitoring_endpoint_port = new_mon_port - try: - mon_ip, mon_port = self.rjObj.get_active_and_standby_mgrs() - print("[Wrong] MonIP: {}, MonPort: {}".format(mon_ip, mon_port)) - self.fail("An exception was expected") - except ExecutionFailureException as err: - print("Exception thrown successfully: {}".format(err)) diff --git a/cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh b/cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh deleted file mode 100644 index ad6e31d80..000000000 --- a/cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/bash -# this script creates all the users/keys on the external cluster -# those keys will be injected via the import-external-cluster.sh once this one is done running -# so you can run import-external-cluster.sh right after this script -# run me like: . cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh -set -e - -############# -# VARIABLES # -############# - -: "${CLIENT_CHECKER_NAME:=client.healthchecker}" -: "${RGW_POOL_PREFIX:=default}" -: "${ns:=rook-ceph-external}" - -############# -# FUNCTIONS # -############# - -function is_available { - command -v "$@" &>/dev/null -} - -function checkEnv() { - if ! is_available ceph; then - echo "'ceph' binary is expected'" - return 1 - fi - - if ! is_available jq; then - echo "'jq' binary is expected'" - return 1 - fi - - if ! ceph -s 1>/dev/null; then - echo "cannot connect to the ceph cluster" - return 1 - fi -} - -function createCheckerKey() { - ROOK_EXTERNAL_USER_SECRET=$(ceph auth get-or-create "$CLIENT_CHECKER_NAME" mon 'allow r, allow command quorum_status' mgr 'allow command config' osd 'allow rwx pool='"$RGW_POOL_PREFIX"'.rgw.meta, allow r pool=.rgw.root, allow rw pool='"$RGW_POOL_PREFIX"'.rgw.control, allow x pool='"$RGW_POOL_PREFIX"'.rgw.buckets.index, allow x pool='"$RGW_POOL_PREFIX"'.rgw.log'|awk '/key =/ { print $3}') - export ROOK_EXTERNAL_USER_SECRET - export ROOK_EXTERNAL_USERNAME=$CLIENT_CHECKER_NAME -} - -function createCephCSIKeyringRBDNode() { - CSI_RBD_NODE_SECRET=$(ceph auth get-or-create client.csi-rbd-node mon 'profile rbd' osd 'profile rbd'|awk '/key =/ { print $3}') - export CSI_RBD_NODE_SECRET -} - -function createCephCSIKeyringRBDProvisioner() { - CSI_RBD_PROVISIONER_SECRET=$(ceph auth get-or-create client.csi-rbd-provisioner mon 'profile rbd' mgr 'allow rw' osd 'profile rbd'|awk '/key =/ { print $3}') - export CSI_RBD_PROVISIONER_SECRET -} - -function createCephCSIKeyringCephFSNode() { - CSI_CEPHFS_NODE_SECRET=$(ceph auth get-or-create client.csi-cephfs-node mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs *=*' mds 'allow rw'|awk '/key =/ { print $3}') - export CSI_CEPHFS_NODE_SECRET -} - -function createCephCSIKeyringCephFSProvisioner() { - CSI_CEPHFS_PROVISIONER_SECRET=$(ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*'|awk '/key =/ { print $3}') - export CSI_CEPHFS_PROVISIONER_SECRET -} - -function getFSID() { - ROOK_EXTERNAL_FSID=$(ceph fsid) - export ROOK_EXTERNAL_FSID -} - -function externalMonData() { - ROOK_EXTERNAL_CEPH_MON_DATA=$(ceph mon dump -f json 2>/dev/null|jq --raw-output .mons[0].name)=$(ceph mon dump -f json 2>/dev/null|jq --raw-output .mons[0].public_addrs.addrvec[0].addr) - export ROOK_EXTERNAL_CEPH_MON_DATA -} - -function namespace() { - export NAMESPACE=$ns -} - -function createRGWAdminOpsUser() { - createRGWAdminOpsUserKeys=$(radosgw-admin user create --uid rgw-admin-ops-user --display-name "Rook RGW Admin Ops user" --caps "buckets=*;users=*;usage=read;metadata=read;zone=read"|jq --raw-output .keys[0]) - createRGWAdminOpsUserAccessKey=$(echo "$createRGWAdminOpsUserKeys"|jq --raw-output .access_key) - createRGWAdminOpsUserSecretKey=$(echo "$createRGWAdminOpsUserKeys"|jq --raw-output .secret_key) - echo "export RGW_ADMIN_OPS_USER_ACCESS_KEY=$createRGWAdminOpsUserAccessKey" - echo "export RGW_ADMIN_OPS_USER_SECRET_KEY=$createRGWAdminOpsUserSecretKey" -} - - -######## -# MAIN # -######## -checkEnv -createCheckerKey -createCephCSIKeyringRBDNode -createCephCSIKeyringRBDProvisioner -createCephCSIKeyringCephFSNode -createCephCSIKeyringCephFSProvisioner -getFSID -externalMonData -namespace -createRGWAdminOpsUser diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/kube-registry.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/kube-registry.yaml deleted file mode 100644 index 22ca0df82..000000000 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/kube-registry.yaml +++ /dev/null @@ -1,68 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: cephfs-pvc - namespace: kube-system -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - storageClassName: rook-cephfs ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kube-registry - namespace: kube-system - labels: - k8s-app: kube-registry - kubernetes.io/cluster-service: "true" -spec: - replicas: 3 - selector: - matchLabels: - k8s-app: kube-registry - template: - metadata: - labels: - k8s-app: kube-registry - kubernetes.io/cluster-service: "true" - spec: - containers: - - name: registry - image: registry:2 - imagePullPolicy: Always - resources: - limits: - cpu: 100m - memory: 100Mi - env: - # Configuration reference: https://docs.docker.com/registry/configuration/ - - name: REGISTRY_HTTP_ADDR - value: :5000 - - name: REGISTRY_HTTP_SECRET - value: "Ple4seCh4ngeThisN0tAVerySecretV4lue" - - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY - value: /var/lib/registry - volumeMounts: - - name: image-store - mountPath: /var/lib/registry - ports: - - containerPort: 5000 - name: registry - protocol: TCP - livenessProbe: - httpGet: - path: / - port: registry - readinessProbe: - httpGet: - path: / - port: registry - volumes: - - name: image-store - persistentVolumeClaim: - claimName: cephfs-pvc - readOnly: false diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/pod.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/pod.yaml deleted file mode 100644 index ff208263a..000000000 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/pod.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - name: csicephfs-demo-pod -spec: - containers: - - name: web-server - image: nginx - volumeMounts: - - name: mypvc - mountPath: /var/lib/www/html - volumes: - - name: mypvc - persistentVolumeClaim: - claimName: cephfs-pvc - readOnly: false diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml deleted file mode 100644 index b5762995e..000000000 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: cephfs-pvc-clone -spec: - storageClassName: rook-cephfs - dataSource: - name: cephfs-pvc - kind: PersistentVolumeClaim - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml deleted file mode 100644 index dc078d4ca..000000000 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: cephfs-pvc-restore -spec: - storageClassName: rook-cephfs - dataSource: - name: cephfs-pvc-snapshot - kind: VolumeSnapshot - apiGroup: snapshot.storage.k8s.io - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/pvc.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/pvc.yaml deleted file mode 100644 index 0f6addb69..000000000 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/pvc.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: cephfs-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: rook-cephfs diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml deleted file mode 100644 index 2c734c511..000000000 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# 1.17 <= K8s <= v1.19 -# apiVersion: snapshot.storage.k8s.io/v1beta1 -# K8s >= v1.20 -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshot -metadata: - name: cephfs-pvc-snapshot -spec: - volumeSnapshotClassName: csi-cephfsplugin-snapclass - source: - persistentVolumeClaimName: cephfs-pvc diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml deleted file mode 100644 index 8fc847b68..000000000 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# 1.17 <= K8s <= v1.19 -# apiVersion: snapshot.storage.k8s.io/v1beta1 -# K8s >= v1.20 -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshotClass -metadata: - name: csi-cephfsplugin-snapclass -driver: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator -parameters: - # Specify a string that identifies your cluster. Ceph CSI supports any - # unique string. When Ceph CSI is deployed by Rook use the Rook namespace, - # for example "rook-ceph". - clusterID: rook-ceph # namespace:cluster - csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph # namespace:cluster -deletionPolicy: Delete diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml deleted file mode 100644 index 92f1ca8b5..000000000 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-cephfs -provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator -parameters: - # clusterID is the namespace where operator is deployed. - clusterID: rook-ceph # namespace:cluster - - # CephFS filesystem name into which the volume shall be created - fsName: myfs-ec - - # Ceph pool into which the volume shall be created - # Required for provisionVolume: "true" - pool: myfs-ec-data0 - - # The secrets contain Ceph admin credentials. These are generated automatically by the operator - # in the same namespace as the cluster. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster - csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster - - # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel) - # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse - # or by setting the default mounter explicitly via --volumemounter command-line argument. - # mounter: kernel -reclaimPolicy: Delete -allowVolumeExpansion: true -mountOptions: - # uncomment the following line for debugging - #- debug diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml deleted file mode 100644 index fc8169b64..000000000 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-cephfs -provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator -parameters: - # clusterID is the namespace where operator is deployed. - clusterID: rook-ceph # namespace:cluster - - # CephFS filesystem name into which the volume shall be created - fsName: myfs - - # Ceph pool into which the volume shall be created - # Required for provisionVolume: "true" - pool: myfs-data0 - - # The secrets contain Ceph admin credentials. These are generated automatically by the operator - # in the same namespace as the cluster. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster - csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster - - # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel) - # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse - # or by setting the default mounter explicitly via --volumemounter command-line argument. - # mounter: kernel -reclaimPolicy: Delete -allowVolumeExpansion: true -mountOptions: - # uncomment the following line for debugging - #- debug diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/pod.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/pod.yaml deleted file mode 100644 index 504ba7896..000000000 --- a/cluster/examples/kubernetes/ceph/csi/rbd/pod.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - name: csirbd-demo-pod -spec: - containers: - - name: web-server - image: nginx - volumeMounts: - - name: mypvc - mountPath: /var/lib/www/html - volumes: - - name: mypvc - persistentVolumeClaim: - claimName: rbd-pvc - readOnly: false diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml deleted file mode 100644 index d6bb251ec..000000000 --- a/cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: rbd-pvc-clone -spec: - storageClassName: rook-ceph-block - dataSource: - name: rbd-pvc - kind: PersistentVolumeClaim - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml deleted file mode 100644 index 42c926f90..000000000 --- a/cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: rbd-pvc-restore -spec: - storageClassName: rook-ceph-block - dataSource: - name: rbd-pvc-snapshot - kind: VolumeSnapshot - apiGroup: snapshot.storage.k8s.io - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/pvc.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/pvc.yaml deleted file mode 100644 index 516a5aa87..000000000 --- a/cluster/examples/kubernetes/ceph/csi/rbd/pvc.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: rbd-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: rook-ceph-block diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml deleted file mode 100644 index 9c27b410d..000000000 --- a/cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# 1.17 <= K8s <= v1.19 -# apiVersion: snapshot.storage.k8s.io/v1beta1 -# K8s >= v1.20 -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshot -metadata: - name: rbd-pvc-snapshot -spec: - volumeSnapshotClassName: csi-rbdplugin-snapclass - source: - persistentVolumeClaimName: rbd-pvc diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml deleted file mode 100644 index 2ed388456..000000000 --- a/cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# 1.17 <= K8s <= v1.19 -# apiVersion: snapshot.storage.k8s.io/v1beta1 -# K8s >= v1.20 -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshotClass -metadata: - name: csi-rbdplugin-snapclass -driver: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator -parameters: - # Specify a string that identifies your cluster. Ceph CSI supports any - # unique string. When Ceph CSI is deployed by Rook use the Rook namespace, - # for example "rook-ceph". - clusterID: rook-ceph # namespace:cluster - csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph # namespace:cluster -deletionPolicy: Delete diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-ec.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-ec.yaml deleted file mode 100644 index c62507ffc..000000000 --- a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-ec.yaml +++ /dev/null @@ -1,85 +0,0 @@ -################################################################################################################# -# Create a storage class with a data pool that uses erasure coding for a production environment. -# A metadata pool is created with replication enabled. A minimum of 3 nodes with OSDs are required in this -# example since the default failureDomain is host. -# kubectl create -f storageclass-ec.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicated-metadata-pool - namespace: rook-ceph # namespace:cluster -spec: - replicated: - size: 2 ---- -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: ec-data-pool - namespace: rook-ceph # namespace:cluster -spec: - # Make sure you have enough nodes and OSDs running bluestore to support the replica size or erasure code chunks. - # For the below settings, you need at least 3 OSDs on different nodes (because the `failureDomain` is `host` by default). - erasureCoded: - dataChunks: 2 - codingChunks: 1 ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-block -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator -parameters: - # clusterID is the namespace where the rook cluster is running - # If you change this namespace, also change the namespace below where the secret namespaces are defined - clusterID: rook-ceph # namespace:cluster - - # If you want to use erasure coded pool with RBD, you need to create - # two pools. one erasure coded and one replicated. - # You need to specify the replicated pool here in the `pool` parameter, it is - # used for the metadata of the images. - # The erasure coded pool must be set as the `dataPool` parameter below. - dataPool: ec-data-pool - pool: replicated-metadata-pool - - # (optional) mapOptions is a comma-separated list of map options. - # For krbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options - # For nbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # mapOptions: lock_on_read,queue_depth=1024 - - # (optional) unmapOptions is a comma-separated list of unmap options. - # For krbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options - # For nbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # unmapOptions: force - - # RBD image format. Defaults to "2". - imageFormat: "2" - - # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. - imageFeatures: layering - - # The secrets contain Ceph admin credentials. These are generated automatically by the operator - # in the same namespace as the cluster. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster - csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster - # Specify the filesystem type of the volume. If not specified, csi-provisioner - # will set default as `ext4`. - csi.storage.k8s.io/fstype: ext4 -# uncomment the following to use rbd-nbd as mounter on supported nodes -# **IMPORTANT**: CephCSI v3.4.0 onwards a volume healer functionality is added to reattach -# the PVC to application pod if nodeplugin pod restart. -# Its still in Alpha support. Therefore, this option is not recommended for production use. -#mounter: rbd-nbd -allowVolumeExpansion: true -reclaimPolicy: Delete diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-test.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-test.yaml deleted file mode 100644 index f5cf3b45c..000000000 --- a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-test.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph # namespace:cluster -spec: - failureDomain: osd - replicated: - size: 1 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: false - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #targetSizeRatio: .5 ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-block -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator -parameters: - # clusterID is the namespace where the rook cluster is running - # If you change this namespace, also change the namespace below where the secret namespaces are defined - clusterID: rook-ceph # namespace:cluster - - # If you want to use erasure coded pool with RBD, you need to create - # two pools. one erasure coded and one replicated. - # You need to specify the replicated pool here in the `pool` parameter, it is - # used for the metadata of the images. - # The erasure coded pool must be set as the `dataPool` parameter below. - #dataPool: ec-data-pool - pool: replicapool - - # RBD image format. Defaults to "2". - imageFormat: "2" - - # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. - imageFeatures: layering - - # The secrets contain Ceph admin credentials. These are generated automatically by the operator - # in the same namespace as the cluster. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster - csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster - # Specify the filesystem type of the volume. If not specified, csi-provisioner - # will set default as `ext4`. - csi.storage.k8s.io/fstype: ext4 -# uncomment the following to use rbd-nbd as mounter on supported nodes -#mounter: rbd-nbd -allowVolumeExpansion: true -reclaimPolicy: Delete diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml deleted file mode 100644 index 98ef451f7..000000000 --- a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - failureDomain: host - replicated: - size: 3 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: true - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #targetSizeRatio: .5 ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-block -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.rbd.csi.ceph.com -parameters: - # clusterID is the namespace where the rook cluster is running - # If you change this namespace, also change the namespace below where the secret namespaces are defined - clusterID: rook-ceph # namespace:cluster - - # If you want to use erasure coded pool with RBD, you need to create - # two pools. one erasure coded and one replicated. - # You need to specify the replicated pool here in the `pool` parameter, it is - # used for the metadata of the images. - # The erasure coded pool must be set as the `dataPool` parameter below. - #dataPool: ec-data-pool - pool: replicapool - - # (optional) mapOptions is a comma-separated list of map options. - # For krbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options - # For nbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # mapOptions: lock_on_read,queue_depth=1024 - - # (optional) unmapOptions is a comma-separated list of unmap options. - # For krbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options - # For nbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # unmapOptions: force - - # RBD image format. Defaults to "2". - imageFormat: "2" - - # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. - imageFeatures: layering - - # The secrets contain Ceph admin credentials. These are generated automatically by the operator - # in the same namespace as the cluster. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster - csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster - # Specify the filesystem type of the volume. If not specified, csi-provisioner - # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock - # in hyperconverged settings where the volume is mounted on the same node as the osds. - csi.storage.k8s.io/fstype: ext4 -# uncomment the following to use rbd-nbd as mounter on supported nodes -# **IMPORTANT**: CephCSI v3.4.0 onwards a volume healer functionality is added to reattach -# the PVC to application pod if nodeplugin pod restart. -# Its still in Alpha support. Therefore, this option is not recommended for production use. -#mounter: rbd-nbd -allowVolumeExpansion: true -reclaimPolicy: Delete diff --git a/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml b/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml deleted file mode 100644 index 91a2521cd..000000000 --- a/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml +++ /dev/null @@ -1,206 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: csi-cephfsplugin-provisioner - namespace: {{ .Namespace }} -spec: - replicas: {{ .ProvisionerReplicas }} - selector: - matchLabels: - app: csi-cephfsplugin-provisioner - template: - metadata: - labels: - app: csi-cephfsplugin-provisioner - contains: csi-cephfsplugin-metrics - {{ range $key, $value := .CSICephFSPodLabels }} - {{ $key }}: "{{ $value }}" - {{ end }} - spec: - serviceAccountName: rook-csi-cephfs-provisioner-sa - {{ if .ProvisionerPriorityClassName }} - priorityClassName: {{ .ProvisionerPriorityClassName }} - {{ end }} - containers: - - name: csi-attacher - image: {{ .AttacherImage }} - args: - - "--v={{ .LogLevel }}" - - "--csi-address=$(ADDRESS)" - - "--leader-election=true" - - "--timeout=150s" - - "--leader-election-namespace={{ .Namespace }}" - env: - - name: ADDRESS - value: /csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: socket-dir - mountPath: /csi - {{ if .EnableCephFSSnapshotter }} - - name: csi-snapshotter - image: {{ .SnapshotterImage }} - args: - - "--csi-address=$(ADDRESS)" - - "--v={{ .LogLevel }}" - - "--timeout=150s" - - "--leader-election=true" - - "--leader-election-namespace={{ .Namespace }}" - env: - - name: ADDRESS - value: unix:///csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: socket-dir - mountPath: /csi - {{ end }} - - name: csi-resizer - image: {{ .ResizerImage }} - args: - - "--csi-address=$(ADDRESS)" - - "--v={{ .LogLevel }}" - - "--timeout=150s" - - "--leader-election" - - "--leader-election-namespace={{ .Namespace }}" - - "--handle-volume-inuse-error=false" - env: - - name: ADDRESS - value: unix:///csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: socket-dir - mountPath: /csi - - name: csi-provisioner - image: {{ .ProvisionerImage }} - args: - - "--csi-address=$(ADDRESS)" - - "--v={{ .LogLevel }}" - - "--timeout=150s" - - "--retry-interval-start=500ms" - - "--leader-election=true" - - "--leader-election-namespace={{ .Namespace }}" - env: - - name: ADDRESS - value: unix:///csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: socket-dir - mountPath: /csi - - name: csi-cephfsplugin - image: {{ .CSIPluginImage }} - args: - - "--nodeid=$(NODE_ID)" - - "--type=cephfs" - - "--endpoint=$(CSI_ENDPOINT)" - - "--v={{ .LogLevel }}" - - "--controllerserver=true" - - "--drivername={{ .DriverNamePrefix }}cephfs.csi.ceph.com" - - "--pidlimit=-1" - - "--metricsport={{ .CephFSGRPCMetricsPort }}" - - "--forcecephkernelclient={{ .ForceCephFSKernelClient }}" - - "--metricspath=/metrics" - - "--enablegrpcmetrics={{ .EnableCSIGRPCMetrics }}" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NODE_ID - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CSI_ENDPOINT - value: unix:///csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: socket-dir - mountPath: /csi - - name: host-sys - mountPath: /sys - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: host-dev - mountPath: /dev - - name: ceph-csi-config - mountPath: /etc/ceph-csi-config/ - - name: keys-tmp-dir - mountPath: /tmp/csi/keys - - name: liveness-prometheus - image: {{ .CSIPluginImage }} - args: - - "--type=liveness" - - "--endpoint=$(CSI_ENDPOINT)" - - "--metricsport={{ .CephFSLivenessMetricsPort }}" - - "--metricspath=/metrics" - - "--polltime=60s" - - "--timeout=3s" - env: - - name: CSI_ENDPOINT - value: unix:///csi/csi-provisioner.sock - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - volumeMounts: - - name: socket-dir - mountPath: /csi - imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumes: - - name: socket-dir - emptyDir: { - medium: "Memory" - } - - name: host-sys - hostPath: - path: /sys - - name: lib-modules - hostPath: - path: /lib/modules - - name: host-dev - hostPath: - path: /dev - - name: ceph-csi-config - configMap: - name: rook-ceph-csi-config - items: - - key: csi-cluster-config-json - path: config.json - - name: keys-tmp-dir - emptyDir: { - medium: "Memory" - } diff --git a/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin-svc.yaml b/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin-svc.yaml deleted file mode 100644 index 2eb4b036c..000000000 --- a/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin-svc.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# This is a service to expose the liveness and grpc metrics -apiVersion: v1 -kind: Service -metadata: - name: csi-cephfsplugin-metrics - labels: - app: csi-metrics -spec: - ports: - - name: csi-http-metrics - port: 8080 - protocol: TCP - targetPort: {{ .CephFSLivenessMetricsPort }} - - name: csi-grpc-metrics - port: 8081 - protocol: TCP - targetPort: {{ .CephFSGRPCMetricsPort }} - selector: - contains: csi-cephfsplugin-metrics diff --git a/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin.yaml b/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin.yaml deleted file mode 100644 index 31251daae..000000000 --- a/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin.yaml +++ /dev/null @@ -1,168 +0,0 @@ -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: csi-cephfsplugin - namespace: {{ .Namespace }} -spec: - selector: - matchLabels: - app: csi-cephfsplugin - updateStrategy: - type: {{ .CephFSPluginUpdateStrategy }} - template: - metadata: - labels: - app: csi-cephfsplugin - contains: csi-cephfsplugin-metrics - {{ range $key, $value := .CSICephFSPodLabels }} - {{ $key }}: "{{ $value }}" - {{ end }} - spec: - serviceAccountName: rook-csi-cephfs-plugin-sa - hostNetwork: {{ .EnableCSIHostNetwork }} - {{ if .PluginPriorityClassName }} - priorityClassName: {{ .PluginPriorityClassName }} - {{ end }} - # to use e.g. Rook orchestrated cluster, and mons' FQDN is - # resolved through k8s service, set dns policy to cluster first - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: driver-registrar - # This is necessary only for systems with SELinux, where - # non-privileged sidecar containers cannot access unix domain socket - # created by privileged CSI driver container. - securityContext: - privileged: true - image: {{ .RegistrarImage }} - args: - - "--v={{ .LogLevel }}" - - "--csi-address=/csi/csi.sock" - - "--kubelet-registration-path={{ .KubeletDirPath }}/plugins/{{ .DriverNamePrefix }}cephfs.csi.ceph.com/csi.sock" - env: - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: csi-cephfsplugin - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - image: {{ .CSIPluginImage }} - args: - - "--nodeid=$(NODE_ID)" - - "--type=cephfs" - - "--endpoint=$(CSI_ENDPOINT)" - - "--v={{ .LogLevel }}" - - "--nodeserver=true" - - "--drivername={{ .DriverNamePrefix }}cephfs.csi.ceph.com" - - "--pidlimit=-1" - - "--metricsport={{ .CephFSGRPCMetricsPort }}" - - "--forcecephkernelclient={{ .ForceCephFSKernelClient }}" - - "--metricspath=/metrics" - - "--enablegrpcmetrics={{ .EnableCSIGRPCMetrics }}" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NODE_ID - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: csi-plugins-dir - mountPath: "{{ .KubeletDirPath }}/plugins" - mountPropagation: "Bidirectional" - - name: pods-mount-dir - mountPath: "{{ .KubeletDirPath }}/pods" - mountPropagation: "Bidirectional" - - name: host-sys - mountPath: /sys - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: host-dev - mountPath: /dev - - name: ceph-csi-config - mountPath: /etc/ceph-csi-config/ - - name: keys-tmp-dir - mountPath: /tmp/csi/keys - - name: host-run-mount - mountPath: /run/mount - - name: liveness-prometheus - securityContext: - privileged: true - image: {{ .CSIPluginImage }} - args: - - "--type=liveness" - - "--endpoint=$(CSI_ENDPOINT)" - - "--metricsport={{ .CephFSLivenessMetricsPort }}" - - "--metricspath=/metrics" - - "--polltime=60s" - - "--timeout=3s" - env: - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - volumeMounts: - - name: plugin-dir - mountPath: /csi - imagePullPolicy: "IfNotPresent" - volumes: - - name: plugin-dir - hostPath: - path: "{{ .KubeletDirPath }}/plugins/{{ .DriverNamePrefix }}cephfs.csi.ceph.com/" - type: DirectoryOrCreate - - name: csi-plugins-dir - hostPath: - path: "{{ .KubeletDirPath }}/plugins" - type: Directory - - name: registration-dir - hostPath: - path: "{{ .KubeletDirPath }}/plugins_registry/" - type: Directory - - name: pods-mount-dir - hostPath: - path: "{{ .KubeletDirPath }}/pods" - type: Directory - - name: host-sys - hostPath: - path: /sys - - name: lib-modules - hostPath: - path: /lib/modules - - name: host-dev - hostPath: - path: /dev - - name: ceph-csi-config - configMap: - name: rook-ceph-csi-config - items: - - key: csi-cluster-config-json - path: config.json - - name: keys-tmp-dir - emptyDir: { - medium: "Memory" - } - - name: host-run-mount - hostPath: - path: /run/mount diff --git a/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml b/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml deleted file mode 100644 index 03b962a3f..000000000 --- a/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml +++ /dev/null @@ -1,216 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: csi-rbdplugin-provisioner - namespace: {{ .Namespace }} -spec: - replicas: {{ .ProvisionerReplicas }} - selector: - matchLabels: - app: csi-rbdplugin-provisioner - template: - metadata: - labels: - app: csi-rbdplugin-provisioner - contains: csi-rbdplugin-metrics - {{ range $key, $value := .CSIRBDPodLabels }} - {{ $key }}: "{{ $value }}" - {{ end }} - spec: - serviceAccountName: rook-csi-rbd-provisioner-sa - {{ if .ProvisionerPriorityClassName }} - priorityClassName: {{ .ProvisionerPriorityClassName }} - {{ end }} - containers: - - name: csi-provisioner - image: {{ .ProvisionerImage }} - args: - - "--csi-address=$(ADDRESS)" - - "--v={{ .LogLevel }}" - - "--timeout=150s" - - "--retry-interval-start=500ms" - - "--leader-election=true" - - "--leader-election-namespace={{ .Namespace }}" - - "--default-fstype=ext4" - - "--extra-create-metadata=true" - env: - - name: ADDRESS - value: unix:///csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /csi - - name: csi-resizer - image: {{ .ResizerImage }} - args: - - "--csi-address=$(ADDRESS)" - - "--v={{ .LogLevel }}" - - "--timeout=150s" - - "--leader-election" - - "--leader-election-namespace={{ .Namespace }}" - - "--handle-volume-inuse-error=false" - env: - - name: ADDRESS - value: unix:///csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /csi - - name: csi-attacher - image: {{ .AttacherImage }} - args: - - "--v={{ .LogLevel }}" - - "--timeout=150s" - - "--csi-address=$(ADDRESS)" - - "--leader-election=true" - - "--leader-election-namespace={{ .Namespace }}" - env: - - name: ADDRESS - value: /csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /csi - {{ if .EnableRBDSnapshotter }} - - name: csi-snapshotter - image: {{ .SnapshotterImage }} - args: - - "--csi-address=$(ADDRESS)" - - "--v={{ .LogLevel }}" - - "--timeout=150s" - - "--leader-election=true" - - "--leader-election-namespace={{ .Namespace }}" - env: - - name: ADDRESS - value: unix:///csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /csi - {{ end }} - {{ if .EnableOMAPGenerator }} - - name: csi-omap-generator - image: {{ .CSIPluginImage }} - args : - - "--type=controller" - - "--drivernamespace=$(DRIVER_NAMESPACE)" - - "--v={{ .LogLevel }}" - - "--drivername={{ .DriverNamePrefix }}rbd.csi.ceph.com" - env: - - name: DRIVER_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: ceph-csi-config - mountPath: /etc/ceph-csi-config/ - - name: keys-tmp-dir - mountPath: /tmp/csi/keys - {{ end }} - {{ if .EnableVolumeReplicationSideCar }} - - name: volume-replication - image: {{ .VolumeReplicationImage }} - args : - - "--metrics-bind-address=0" - - "--leader-election-namespace={{ .Namespace }}" - - "--driver-name={{ .DriverNamePrefix }}rbd.csi.ceph.com" - - "--csi-address=$(ADDRESS)" - - "--rpc-timeout=150s" - - "--health-probe-bind-address=:9998" - - "--leader-elect=true" - env: - - name: ADDRESS - value: unix:///csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /csi - {{ end }} - - name: csi-rbdplugin - image: {{ .CSIPluginImage }} - args : - - "--nodeid=$(NODE_ID)" - - "--endpoint=$(CSI_ENDPOINT)" - - "--v={{ .LogLevel }}" - - "--type=rbd" - - "--controllerserver=true" - - "--drivername={{ .DriverNamePrefix }}rbd.csi.ceph.com" - - "--pidlimit=-1" - - "--metricsport={{ .RBDGRPCMetricsPort }}" - - "--metricspath=/metrics" - - "--enablegrpcmetrics={{ .EnableCSIGRPCMetrics }}" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NODE_ID - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CSI_ENDPOINT - value: unix:///csi/csi-provisioner.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /csi - - mountPath: /dev - name: host-dev - - mountPath: /sys - name: host-sys - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - name: ceph-csi-config - mountPath: /etc/ceph-csi-config/ - - name: keys-tmp-dir - mountPath: /tmp/csi/keys - - name: liveness-prometheus - image: {{ .CSIPluginImage }} - args: - - "--type=liveness" - - "--endpoint=$(CSI_ENDPOINT)" - - "--metricsport={{ .RBDLivenessMetricsPort }}" - - "--metricspath=/metrics" - - "--polltime=60s" - - "--timeout=3s" - env: - - name: CSI_ENDPOINT - value: unix:///csi/csi-provisioner.sock - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - volumeMounts: - - name: socket-dir - mountPath: /csi - imagePullPolicy: "IfNotPresent" - volumes: - - name: host-dev - hostPath: - path: /dev - - name: host-sys - hostPath: - path: /sys - - name: lib-modules - hostPath: - path: /lib/modules - - name: socket-dir - emptyDir: { - medium: "Memory" - } - - name: ceph-csi-config - configMap: - name: rook-ceph-csi-config - items: - - key: csi-cluster-config-json - path: config.json - - name: keys-tmp-dir - emptyDir: { - medium: "Memory" - } diff --git a/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin-svc.yaml b/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin-svc.yaml deleted file mode 100644 index 6c432669c..000000000 --- a/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin-svc.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# This is a service to expose the liveness and grpc metrics -apiVersion: v1 -kind: Service -metadata: - name: csi-rbdplugin-metrics - labels: - app: csi-metrics -spec: - ports: - - name: csi-http-metrics - port: 8080 - protocol: TCP - targetPort: {{ .RBDLivenessMetricsPort }} - - name: csi-grpc-metrics - port: 8081 - protocol: TCP - targetPort: {{ .RBDGRPCMetricsPort }} - selector: - contains: csi-rbdplugin-metrics diff --git a/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin.yaml b/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin.yaml deleted file mode 100644 index 4ac97ff7b..000000000 --- a/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin.yaml +++ /dev/null @@ -1,169 +0,0 @@ -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: csi-rbdplugin - namespace: {{ .Namespace }} -spec: - selector: - matchLabels: - app: csi-rbdplugin - updateStrategy: - type: {{ .RBDPluginUpdateStrategy }} - template: - metadata: - labels: - app: csi-rbdplugin - contains: csi-rbdplugin-metrics - {{ range $key, $value := .CSIRBDPodLabels }} - {{ $key }}: "{{ $value }}" - {{ end }} - spec: - serviceAccountName: rook-csi-rbd-plugin-sa - {{ if .PluginPriorityClassName }} - priorityClassName: {{ .PluginPriorityClassName }} - {{ end }} - hostNetwork: {{ .EnableCSIHostNetwork }} - hostPID: true - # to use e.g. Rook orchestrated cluster, and mons' FQDN is - # resolved through k8s service, set dns policy to cluster first - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: driver-registrar - # This is necessary only for systems with SELinux, where - # non-privileged sidecar containers cannot access unix domain socket - # created by privileged CSI driver container. - securityContext: - privileged: true - image: {{ .RegistrarImage }} - args: - - "--v={{ .LogLevel }}" - - "--csi-address=/csi/csi.sock" - - "--kubelet-registration-path={{ .KubeletDirPath }}/plugins/{{ .DriverNamePrefix }}rbd.csi.ceph.com/csi.sock" - env: - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: csi-rbdplugin - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - image: {{ .CSIPluginImage }} - args : - - "--nodeid=$(NODE_ID)" - - "--endpoint=$(CSI_ENDPOINT)" - - "--v={{ .LogLevel }}" - - "--type=rbd" - - "--nodeserver=true" - - "--drivername={{ .DriverNamePrefix }}rbd.csi.ceph.com" - - "--pidlimit=-1" - - "--metricsport={{ .RBDGRPCMetricsPort }}" - - "--metricspath=/metrics" - - "--enablegrpcmetrics={{ .EnableCSIGRPCMetrics }}" - - "--stagingpath={{ .KubeletDirPath }}/plugins/kubernetes.io/csi/pv/" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NODE_ID - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: pods-mount-dir - mountPath: "{{ .KubeletDirPath }}/pods" - mountPropagation: "Bidirectional" - - name: plugin-mount-dir - mountPath: "{{ .KubeletDirPath }}/plugins" - mountPropagation: "Bidirectional" - - mountPath: /dev - name: host-dev - - mountPath: /sys - name: host-sys - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - name: ceph-csi-config - mountPath: /etc/ceph-csi-config/ - - name: keys-tmp-dir - mountPath: /tmp/csi/keys - - name: host-run-mount - mountPath: /run/mount - - name: liveness-prometheus - securityContext: - privileged: true - image: {{ .CSIPluginImage }} - args: - - "--type=liveness" - - "--endpoint=$(CSI_ENDPOINT)" - - "--metricsport={{ .RBDLivenessMetricsPort }}" - - "--metricspath=/metrics" - - "--polltime=60s" - - "--timeout=3s" - env: - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - volumeMounts: - - name: plugin-dir - mountPath: /csi - imagePullPolicy: "IfNotPresent" - volumes: - - name: plugin-dir - hostPath: - path: "{{ .KubeletDirPath }}/plugins/{{ .DriverNamePrefix }}rbd.csi.ceph.com" - type: DirectoryOrCreate - - name: plugin-mount-dir - hostPath: - path: "{{ .KubeletDirPath }}/plugins" - type: Directory - - name: registration-dir - hostPath: - path: "{{ .KubeletDirPath }}/plugins_registry/" - type: Directory - - name: pods-mount-dir - hostPath: - path: "{{ .KubeletDirPath }}/pods" - type: Directory - - name: host-dev - hostPath: - path: /dev - - name: host-sys - hostPath: - path: /sys - - name: lib-modules - hostPath: - path: /lib/modules - - name: ceph-csi-config - configMap: - name: rook-ceph-csi-config - items: - - key: csi-cluster-config-json - path: config.json - - name: keys-tmp-dir - emptyDir: { - medium: "Memory" - } - - name: host-run-mount - hostPath: - path: /run/mount diff --git a/cluster/examples/kubernetes/ceph/dashboard-external-http.yaml b/cluster/examples/kubernetes/ceph/dashboard-external-http.yaml deleted file mode 100644 index 27fc59866..000000000 --- a/cluster/examples/kubernetes/ceph/dashboard-external-http.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: rook-ceph-mgr-dashboard-external-http - namespace: rook-ceph # namespace:cluster - labels: - app: rook-ceph-mgr - rook_cluster: rook-ceph # namespace:cluster -spec: - ports: - - name: dashboard - port: 7000 - protocol: TCP - targetPort: 7000 - selector: - app: rook-ceph-mgr - rook_cluster: rook-ceph - sessionAffinity: None - type: NodePort diff --git a/cluster/examples/kubernetes/ceph/dashboard-external-https.yaml b/cluster/examples/kubernetes/ceph/dashboard-external-https.yaml deleted file mode 100644 index 9d30fadca..000000000 --- a/cluster/examples/kubernetes/ceph/dashboard-external-https.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: rook-ceph-mgr-dashboard-external-https - namespace: rook-ceph # namespace:cluster - labels: - app: rook-ceph-mgr - rook_cluster: rook-ceph # namespace:cluster -spec: - ports: - - name: dashboard - port: 8443 - protocol: TCP - targetPort: 8443 - selector: - app: rook-ceph-mgr - rook_cluster: rook-ceph - sessionAffinity: None - type: NodePort diff --git a/cluster/examples/kubernetes/ceph/dashboard-ingress-https.yaml b/cluster/examples/kubernetes/ceph/dashboard-ingress-https.yaml deleted file mode 100644 index e3cfa3639..000000000 --- a/cluster/examples/kubernetes/ceph/dashboard-ingress-https.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# -# This example is for Kubernetes running an ngnix-ingress -# and an ACME (e.g. Let's Encrypt) certificate service -# -# The nginx-ingress annotations support the dashboard -# running using HTTPS with a self-signed certificate -# -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: rook-ceph-mgr-dashboard - namespace: rook-ceph # namespace:cluster - annotations: - kubernetes.io/ingress.class: "nginx" - kubernetes.io/tls-acme: "true" - nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - nginx.ingress.kubernetes.io/server-snippet: | - proxy_ssl_verify off; -spec: - tls: - - hosts: - - rook-ceph.example.com - secretName: rook-ceph.example.com - rules: - - host: rook-ceph.example.com - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: rook-ceph-mgr-dashboard - port: - name: https-dashboard diff --git a/cluster/examples/kubernetes/ceph/dashboard-loadbalancer.yaml b/cluster/examples/kubernetes/ceph/dashboard-loadbalancer.yaml deleted file mode 100644 index 98c0b9075..000000000 --- a/cluster/examples/kubernetes/ceph/dashboard-loadbalancer.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: rook-ceph-mgr-dashboard-loadbalancer - namespace: rook-ceph # namespace:cluster - labels: - app: rook-ceph-mgr - rook_cluster: rook-ceph # namespace:cluster -spec: - ports: - - name: dashboard - port: 8443 - protocol: TCP - targetPort: 8443 - selector: - app: rook-ceph-mgr - rook_cluster: rook-ceph - sessionAffinity: None - type: LoadBalancer diff --git a/cluster/examples/kubernetes/ceph/direct-mount.yaml b/cluster/examples/kubernetes/ceph/direct-mount.yaml deleted file mode 100644 index 54464470a..000000000 --- a/cluster/examples/kubernetes/ceph/direct-mount.yaml +++ /dev/null @@ -1,64 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-direct-mount - namespace: rook-ceph # namespace:cluster - labels: - app: rook-direct-mount -spec: - replicas: 1 - selector: - matchLabels: - app: rook-direct-mount - template: - metadata: - labels: - app: rook-direct-mount - spec: - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: rook-direct-mount - image: rook/ceph:v1.7.2 - command: ["/tini"] - args: ["-g", "--", "/usr/local/bin/toolbox.sh"] - imagePullPolicy: IfNotPresent - env: - - name: ROOK_CEPH_USERNAME - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-username - - name: ROOK_CEPH_SECRET - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-secret - securityContext: - privileged: true - volumeMounts: - - mountPath: /dev - name: dev - - mountPath: /sys/bus - name: sysbus - - mountPath: /lib/modules - name: libmodules - - name: mon-endpoint-volume - mountPath: /etc/rook - # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021 - hostNetwork: true - volumes: - - name: dev - hostPath: - path: /dev - - name: sysbus - hostPath: - path: /sys/bus - - name: libmodules - hostPath: - path: /lib/modules - - name: mon-endpoint-volume - configMap: - name: rook-ceph-mon-endpoints - items: - - key: data - path: mon-endpoints diff --git a/cluster/examples/kubernetes/ceph/filesystem-ec.yaml b/cluster/examples/kubernetes/ceph/filesystem-ec.yaml deleted file mode 100644 index 205adfd13..000000000 --- a/cluster/examples/kubernetes/ceph/filesystem-ec.yaml +++ /dev/null @@ -1,88 +0,0 @@ -################################################################################################################# -# Create a filesystem with settings for erasure-coding instead of replication. A minimum of three nodes with OSDs -# are required in this example since the default failureDomain is host. -# kubectl create -f filesystem-ec.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: myfs-ec - namespace: rook-ceph # namespace:cluster -spec: - # The metadata pool spec - metadataPool: - replicated: - # You need at least three OSDs on different nodes for this config to work - size: 3 - # The list of data pool specs - dataPools: - # You need at least three `bluestore` OSDs on different nodes for this config to work - - erasureCoded: - dataChunks: 2 - codingChunks: 1 - # Inline compression mode for the data pool - parameters: - compression_mode: none - # Whether to preserve filesystem after CephFilesystem CRD deletion - preserveFilesystemOnDelete: true - # The metadata service (mds) configuration - metadataServer: - # The number of active MDS instances - activeCount: 1 - # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. - # If false, standbys will be available, but will not have a warm cache. - activeStandby: true - # The affinity rules to apply to the mds deployment - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - mds-node - # topologySpreadConstraints: - # tolerations: - # - key: mds-node - # operator: Exists - # podAffinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-mds - # topologyKey: kubernetes.io/hostname will place MDS across different hosts - topologyKey: kubernetes.io/hostname - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-mds - # topologyKey: */zone can be used to spread MDS across different AZ - # Use in k8s cluster if your cluster is v1.16 or lower - # Use in k8s cluster is v1.17 or upper - topologyKey: topology.kubernetes.io/zone - # A key/value list of annotations - annotations: - # key: value - # A key/value list of labels - labels: - # key: value - resources: - # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # priorityClassName: my-priority-class diff --git a/cluster/examples/kubernetes/ceph/filesystem-mirror.yaml b/cluster/examples/kubernetes/ceph/filesystem-mirror.yaml deleted file mode 100644 index 85ab0c6c3..000000000 --- a/cluster/examples/kubernetes/ceph/filesystem-mirror.yaml +++ /dev/null @@ -1,38 +0,0 @@ -################################################################################################################# -# Create filesystem-mirror daemon -# kubectl create -f filesystem-mirror.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephFilesystemMirror -metadata: - name: my-fs-mirror - namespace: rook-ceph # namespace:cluster -spec: - # The affinity rules to apply to the mds deployment - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - cephfs-mirror - # tolerations: - # - key: cephfs-mirror-node - # operator: Exists - # podAffinity: - # podAntiAffinity: - # A key/value list of annotations - annotations: - # key: value - resources: - # The requests and limits, for example to allow the cephfs-mirror pod(s) to use half of one CPU core and 1 gigabyte of memory - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # priorityClassName: my-priority-class diff --git a/cluster/examples/kubernetes/ceph/filesystem-test.yaml b/cluster/examples/kubernetes/ceph/filesystem-test.yaml deleted file mode 100644 index c001f7531..000000000 --- a/cluster/examples/kubernetes/ceph/filesystem-test.yaml +++ /dev/null @@ -1,24 +0,0 @@ -################################################################################################################# -# Create a filesystem with settings for a test environment where only a single OSD is required. -# kubectl create -f filesystem-test.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: myfs - namespace: rook-ceph # namespace:cluster -spec: - metadataPool: - replicated: - size: 1 - requireSafeReplicaSize: false - dataPools: - - failureDomain: osd - replicated: - size: 1 - requireSafeReplicaSize: false - preserveFilesystemOnDelete: false - metadataServer: - activeCount: 1 - activeStandby: true diff --git a/cluster/examples/kubernetes/ceph/filesystem.yaml b/cluster/examples/kubernetes/ceph/filesystem.yaml deleted file mode 100644 index eedd7181d..000000000 --- a/cluster/examples/kubernetes/ceph/filesystem.yaml +++ /dev/null @@ -1,122 +0,0 @@ -################################################################################################################# -# Create a filesystem with settings with replication enabled for a production environment. -# A minimum of 3 OSDs on different nodes are required in this example. -# kubectl create -f filesystem.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: myfs - namespace: rook-ceph # namespace:cluster -spec: - # The metadata pool spec. Must use replication. - metadataPool: - replicated: - size: 3 - requireSafeReplicaSize: true - parameters: - # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - compression_mode: - none - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #target_size_ratio: ".5" - # The list of data pool specs. Can use replication or erasure coding. - dataPools: - - failureDomain: host - replicated: - size: 3 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: true - parameters: - # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - compression_mode: - none - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #target_size_ratio: ".5" - # Whether to preserve filesystem after CephFilesystem CRD deletion - preserveFilesystemOnDelete: true - # The metadata service (mds) configuration - metadataServer: - # The number of active MDS instances - activeCount: 1 - # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. - # If false, standbys will be available, but will not have a warm cache. - activeStandby: true - # The affinity rules to apply to the mds deployment - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - mds-node - # topologySpreadConstraints: - # tolerations: - # - key: mds-node - # operator: Exists - # podAffinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-mds - # topologyKey: kubernetes.io/hostname will place MDS across different hosts - topologyKey: kubernetes.io/hostname - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-mds - # topologyKey: */zone can be used to spread MDS across different AZ - # Use in k8s cluster if your cluster is v1.16 or lower - # Use in k8s cluster is v1.17 or upper - topologyKey: topology.kubernetes.io/zone - # A key/value list of annotations - annotations: - # key: value - # A key/value list of labels - labels: - # key: value - resources: - # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # priorityClassName: my-priority-class - # Filesystem mirroring settings - # mirroring: - # enabled: true - # list of Kubernetes Secrets containing the peer token - # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers - # peers: - #secretNames: - #- secondary-cluster-peer - # specify the schedule(s) on which snapshots should be taken - # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules - # snapshotSchedules: - # - path: / - # interval: 24h # daily snapshots - # startTime: 11:55 - # manage retention policies - # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies - # snapshotRetention: - # - path: / - # duration: "h 24" diff --git a/cluster/examples/kubernetes/ceph/flex/kube-registry.yaml b/cluster/examples/kubernetes/ceph/flex/kube-registry.yaml deleted file mode 100644 index 344fefe73..000000000 --- a/cluster/examples/kubernetes/ceph/flex/kube-registry.yaml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kube-registry - namespace: kube-system - labels: - k8s-app: kube-registry - kubernetes.io/cluster-service: "true" -spec: - replicas: 3 - selector: - matchLabels: - k8s-app: kube-registry - template: - metadata: - labels: - k8s-app: kube-registry - kubernetes.io/cluster-service: "true" - spec: - containers: - - name: registry - image: registry:2 - imagePullPolicy: Always - resources: - limits: - cpu: 100m - memory: 100Mi - env: - # Configuration reference: https://docs.docker.com/registry/configuration/ - - name: REGISTRY_HTTP_ADDR - value: :5000 - - name: REGISTRY_HTTP_SECRET - value: "Ple4seCh4ngeThisN0tAVerySecretV4lue" - - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY - value: /var/lib/registry - volumeMounts: - - name: image-store - mountPath: /var/lib/registry - ports: - - containerPort: 5000 - name: registry - protocol: TCP - livenessProbe: - httpGet: - path: / - port: registry - readinessProbe: - httpGet: - path: / - port: registry - volumes: - - name: image-store - flexVolume: - driver: ceph.rook.io/rook - fsType: ceph - options: - fsName: myfs # name of the filesystem specified in the filesystem CRD. - clusterNamespace: rook-ceph # namespace where the Rook cluster is deployed - # by default the path is /, but you can override and mount a specific path of the filesystem by using the path attribute - # the path must exist on the filesystem, otherwise mounting the filesystem at that path will fail - # path: /some/path/inside/cephfs - # (Optional) Specify an existing Ceph user that will be used for mounting storage with this StorageClass. - #mountUser: user1 - # (Optional) Specify an existing Kubernetes secret name containing just one key holding the Ceph user secret. - # The secret must exist in each namespace(s) where the storage will be consumed. - #mountSecret: ceph-user1-secret diff --git a/cluster/examples/kubernetes/ceph/flex/storageclass-ec.yaml b/cluster/examples/kubernetes/ceph/flex/storageclass-ec.yaml deleted file mode 100644 index 7678aa95b..000000000 --- a/cluster/examples/kubernetes/ceph/flex/storageclass-ec.yaml +++ /dev/null @@ -1,48 +0,0 @@ -################################################################################################################# -# Create a storage class with a data pool that uses erasure coding for a production environment. -# A metadata pool is created with replication enabled. A minimum of 3 nodes with OSDs are required in this -# example since the default failureDomain is host. -# kubectl create -f storageclass-ec.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicated-metadata-pool - namespace: rook-ceph -spec: - replicated: - size: 2 ---- -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: ec-data-pool - namespace: rook-ceph -spec: - # Make sure you have enough nodes and OSDs running bluestore to support the replica size or erasure code chunks. - # For the below settings, you need at least 3 OSDs on different nodes (because the `failureDomain` is `host` by default). - erasureCoded: - dataChunks: 2 - codingChunks: 1 ---- -# The nodes that are going to mount the erasure coded RBD block storage must have Linux kernel >= `4.11`. -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-block -provisioner: ceph.rook.io/block -# Works for Kubernetes 1.14+ -allowVolumeExpansion: true -parameters: - # If you want to use erasure coded pool with RBD, you need to create two pools (as seen above): one erasure coded and one replicated. - # You need to specify the replicated pool here in the `blockPool` parameter, it is used for the metadata of the images. - # The erasure coded pool must be set as the `dataBlockPool` parameter below. - blockPool: replicated-metadata-pool - dataBlockPool: ec-data-pool - # Specify the namespace of the rook cluster from which to create volumes. - # If not specified, it will use `rook` as the default namespace of the cluster. - # This is also the namespace where the cluster will be - clusterNamespace: rook-ceph - # Specify the filesystem type of the volume. If not specified, it will use `ext4`. - fstype: ext4 diff --git a/cluster/examples/kubernetes/ceph/flex/storageclass-test.yaml b/cluster/examples/kubernetes/ceph/flex/storageclass-test.yaml deleted file mode 100644 index a1a4a9d2b..000000000 --- a/cluster/examples/kubernetes/ceph/flex/storageclass-test.yaml +++ /dev/null @@ -1,25 +0,0 @@ -################################################################################################################# -# Create a storage class with a pool for a test environment. Only a single OSD is required in this example. -# kubectl create -f storageclass-test.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - replicated: - size: 1 ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-block -provisioner: ceph.rook.io/block -# Works for Kubernetes 1.14+ -allowVolumeExpansion: true -parameters: - blockPool: replicapool - clusterNamespace: rook-ceph - fstype: ext4 diff --git a/cluster/examples/kubernetes/ceph/flex/storageclass.yaml b/cluster/examples/kubernetes/ceph/flex/storageclass.yaml deleted file mode 100644 index d27113d52..000000000 --- a/cluster/examples/kubernetes/ceph/flex/storageclass.yaml +++ /dev/null @@ -1,35 +0,0 @@ -################################################################################################################# -# Create a storage class with a pool that sets replication for a production environment. -# A minimum of 3 nodes with OSDs are required in this example since the default failureDomain is host. -# kubectl create -f storageclass.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - replicated: - size: 3 ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-block -provisioner: ceph.rook.io/block -# Works for Kubernetes 1.14+ -allowVolumeExpansion: true -parameters: - blockPool: replicapool - # Specify the namespace of the rook cluster from which to create volumes. - # If not specified, it will use `rook` as the default namespace of the cluster. - # This is also the namespace where the cluster will be - clusterNamespace: rook-ceph - # Specify the filesystem type of the volume. If not specified, it will use `ext4`. - fstype: ext4 - # (Optional) Specify an existing Ceph user that will be used for mounting storage with this StorageClass. - #mountUser: user1 - # (Optional) Specify an existing Kubernetes secret name containing just one key holding the Ceph user secret. - # The secret must exist in each namespace(s) where the storage will be consumed. - #mountSecret: ceph-user1-secret diff --git a/cluster/examples/kubernetes/ceph/import-external-cluster.sh b/cluster/examples/kubernetes/ceph/import-external-cluster.sh deleted file mode 100644 index ea0f438be..000000000 --- a/cluster/examples/kubernetes/ceph/import-external-cluster.sh +++ /dev/null @@ -1,165 +0,0 @@ -#!/bin/bash -set -e - -############## -# VARIABLES # -############# -MON_SECRET_NAME=rook-ceph-mon -CSI_RBD_NODE_SECRET_NAME=rook-csi-rbd-node -CSI_RBD_PROVISIONER_SECRET_NAME=rook-csi-rbd-provisioner -CSI_CEPHFS_NODE_SECRET_NAME=rook-csi-cephfs-node -CSI_CEPHFS_PROVISIONER_SECRET_NAME=rook-csi-cephfs-provisioner -RGW_ADMIN_OPS_USER_SECRET_NAME=rgw-admin-ops-user -MON_SECRET_CLUSTER_NAME_KEYNAME=cluster-name -MON_SECRET_FSID_KEYNAME=fsid -MON_SECRET_ADMIN_KEYRING_KEYNAME=admin-secret -MON_SECRET_MON_KEYRING_KEYNAME=mon-secret -MON_SECRET_CEPH_USERNAME_KEYNAME=ceph-username -MON_SECRET_CEPH_SECRET_KEYNAME=ceph-secret -MON_ENDPOINT_CONFIGMAP_NAME=rook-ceph-mon-endpoints -ROOK_EXTERNAL_CLUSTER_NAME=$NAMESPACE -ROOK_EXTERNAL_MAX_MON_ID=2 -ROOK_EXTERNAL_MAPPING={} -ROOK_EXTERNAL_MONITOR_SECRET=mon-secret -: "${ROOK_EXTERNAL_ADMIN_SECRET:=admin-secret}" - -############# -# FUNCTIONS # -############# - -function checkEnvVars() { - if [ -z "$NAMESPACE" ]; then - echo "Please populate the environment variable NAMESPACE" - exit 1 - fi - if [ -z "$ROOK_EXTERNAL_FSID" ]; then - echo "Please populate the environment variable ROOK_EXTERNAL_FSID" - exit 1 - fi - if [ -z "$ROOK_EXTERNAL_CEPH_MON_DATA" ]; then - echo "Please populate the environment variable ROOK_EXTERNAL_CEPH_MON_DATA" - exit 1 - fi - if [[ "$ROOK_EXTERNAL_ADMIN_SECRET" == "admin-secret" ]]; then - if [ -z "$ROOK_EXTERNAL_USER_SECRET" ]; then - echo "Please populate the environment variable ROOK_EXTERNAL_USER_SECRET" - exit 1 - fi - if [ -z "$ROOK_EXTERNAL_USERNAME" ]; then - echo "Please populate the environment variable ROOK_EXTERNAL_USERNAME" - exit 1 - fi - if [ -z "$CSI_RBD_NODE_SECRET" ]; then - echo "Please populate the environment variable CSI_RBD_NODE_SECRET" - exit 1 - fi - if [ -z "$CSI_RBD_PROVISIONER_SECRET" ]; then - echo "Please populate the environment variable CSI_RBD_PROVISIONER_SECRET" - exit 1 - fi - if [ -z "$CSI_CEPHFS_NODE_SECRET" ]; then - echo "Please populate the environment variable CSI_CEPHFS_NODE_SECRET" - exit 1 - fi - if [ -z "$CSI_CEPHFS_PROVISIONER_SECRET" ]; then - echo "Please populate the environment variable CSI_CEPHFS_PROVISIONER_SECRET" - exit 1 - fi - fi - if [[ "$ROOK_EXTERNAL_ADMIN_SECRET" != "admin-secret" ]] && [ -n "$ROOK_EXTERNAL_USER_SECRET" ] ; then - echo "Providing both ROOK_EXTERNAL_ADMIN_SECRET and ROOK_EXTERNAL_USER_SECRET is not supported, choose one only." - exit 1 - fi -} - -function importSecret() { - kubectl -n "$NAMESPACE" \ - create \ - secret \ - generic \ - --type="kubernetes.io/rook" \ - "$MON_SECRET_NAME" \ - --from-literal="$MON_SECRET_CLUSTER_NAME_KEYNAME"="$ROOK_EXTERNAL_CLUSTER_NAME" \ - --from-literal="$MON_SECRET_FSID_KEYNAME"="$ROOK_EXTERNAL_FSID" \ - --from-literal="$MON_SECRET_ADMIN_KEYRING_KEYNAME"="$ROOK_EXTERNAL_ADMIN_SECRET" \ - --from-literal="$MON_SECRET_MON_KEYRING_KEYNAME"="$ROOK_EXTERNAL_MONITOR_SECRET" \ - --from-literal="$MON_SECRET_CEPH_USERNAME_KEYNAME"="$ROOK_EXTERNAL_USERNAME" \ - --from-literal="$MON_SECRET_CEPH_SECRET_KEYNAME"="$ROOK_EXTERNAL_USER_SECRET" -} - -function importConfigMap() { - kubectl -n "$NAMESPACE" \ - create \ - configmap \ - "$MON_ENDPOINT_CONFIGMAP_NAME" \ - --from-literal=data="$ROOK_EXTERNAL_CEPH_MON_DATA" \ - --from-literal=mapping="$ROOK_EXTERNAL_MAPPING" \ - --from-literal=maxMonId="$ROOK_EXTERNAL_MAX_MON_ID" -} - -function importCsiRBDNodeSecret() { - kubectl -n "$NAMESPACE" \ - create \ - secret \ - generic \ - --type="kubernetes.io/rook" \ - "$CSI_RBD_NODE_SECRET_NAME" \ - --from-literal=userID=csi-rbd-node \ - --from-literal=userKey="$CSI_RBD_NODE_SECRET" -} - -function importCsiRBDProvisionerSecret() { - kubectl -n "$NAMESPACE" \ - create \ - secret \ - generic \ - --type="kubernetes.io/rook" \ - "$CSI_RBD_PROVISIONER_SECRET_NAME" \ - --from-literal=userID=csi-rbd-provisioner \ - --from-literal=userKey="$CSI_RBD_PROVISIONER_SECRET" -} - -function importCsiCephFSNodeSecret() { - kubectl -n "$NAMESPACE" \ - create \ - secret \ - generic \ - --type="kubernetes.io/rook" \ - "$CSI_CEPHFS_NODE_SECRET_NAME" \ - --from-literal=adminID=csi-cephfs-node \ - --from-literal=adminKey="$CSI_CEPHFS_NODE_SECRET" -} - -function importCsiCephFSProvisionerSecret() { - kubectl -n "$NAMESPACE" \ - create \ - secret \ - generic \ - --type="kubernetes.io/rook" \ - "$CSI_CEPHFS_PROVISIONER_SECRET_NAME" \ - --from-literal=adminID=csi-cephfs-provisioner \ - --from-literal=adminKey="$CSI_CEPHFS_PROVISIONER_SECRET" -} - -function importRGWAdminOpsUser() { - kubectl -n "$NAMESPACE" \ - create \ - secret \ - generic \ - --type="kubernetes.io/rook" \ - "$RGW_ADMIN_OPS_USER_SECRET_NAME" \ - --from-literal=accessKey="$RGW_ADMIN_OPS_USER_ACCESS_KEY" \ - --from-literal=secretKey="$RGW_ADMIN_OPS_USER_SECRET_KEY" -} - -######## -# MAIN # -######## -checkEnvVars -importSecret -importConfigMap -importCsiRBDNodeSecret -importCsiRBDProvisionerSecret -importCsiCephFSNodeSecret -importCsiCephFSProvisionerSecret -importRGWAdminOpsUser diff --git a/cluster/examples/kubernetes/ceph/monitoring/csi-metrics-service-monitor.yaml b/cluster/examples/kubernetes/ceph/monitoring/csi-metrics-service-monitor.yaml deleted file mode 100644 index ba43fbd2c..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/csi-metrics-service-monitor.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: csi-metrics - namespace: rook-ceph - labels: - team: rook -spec: - namespaceSelector: - matchNames: - - rook-ceph - selector: - matchLabels: - app: csi-metrics - endpoints: - - port: csi-http-metrics - path: /metrics - interval: 5s - # comment csi-grpc-metrics realated information if csi grpc metrics is not enabled - - port: csi-grpc-metrics - path: /metrics - interval: 5s diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules-external.yaml b/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules-external.yaml deleted file mode 100644 index cf8488efb..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules-external.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - labels: - prometheus: rook-prometheus - role: alert-rules - name: prometheus-ceph-rules - namespace: rook-ceph -spec: - groups: - - name: persistent-volume-alert.rules - rules: - - alert: PersistentVolumeUsageNearFull - annotations: - description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed - 75%. Free up some space. - message: PVC {{ $labels.persistentvolumeclaim }} is nearing full. Data deletion - is required. - severity_level: warning - storage_type: ceph - expr: | - (kubelet_volume_stats_used_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) / (kubelet_volume_stats_capacity_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) > 0.75 - for: 5s - labels: - severity: warning - - alert: PersistentVolumeUsageCritical - annotations: - description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed - 85%. Free up some space immediately. - message: PVC {{ $labels.persistentvolumeclaim }} is critically full. Data - deletion is required. - severity_level: error - storage_type: ceph - expr: | - (kubelet_volume_stats_used_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) / (kubelet_volume_stats_capacity_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) > 0.85 - for: 5s - labels: - severity: critical diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml b/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml deleted file mode 100644 index 0538c572d..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml +++ /dev/null @@ -1,352 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - labels: - prometheus: rook-prometheus - role: alert-rules - name: prometheus-ceph-rules - namespace: rook-ceph -spec: - groups: - - name: ceph.rules - rules: - - expr: | - kube_node_status_condition{condition="Ready",job="kube-state-metrics",status="true"} * on (node) group_right() max(label_replace(ceph_disk_occupation{job="rook-ceph-mgr"},"node","$1","exported_instance","(.*)")) by (node) - record: cluster:ceph_node_down:join_kube - - expr: | - avg(topk by (ceph_daemon) (1, label_replace(label_replace(ceph_disk_occupation{job="rook-ceph-mgr"}, "instance", "$1", "exported_instance", "(.*)"), "device", "$1", "device", "/dev/(.*)")) * on(instance, device) group_right(ceph_daemon) topk by (instance,device) (1,(irate(node_disk_read_time_seconds_total[1m]) + irate(node_disk_write_time_seconds_total[1m]) / (clamp_min(irate(node_disk_reads_completed_total[1m]), 1) + irate(node_disk_writes_completed_total[1m]))))) - record: cluster:ceph_disk_latency:join_ceph_node_disk_irate1m - - name: telemeter.rules - rules: - - expr: | - count(ceph_osd_metadata{job="rook-ceph-mgr"}) - record: job:ceph_osd_metadata:count - - expr: | - count(kube_persistentvolume_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"}) - record: job:kube_pv:count - - expr: | - sum(ceph_pool_rd{job="rook-ceph-mgr"}+ ceph_pool_wr{job="rook-ceph-mgr"}) - record: job:ceph_pools_iops:total - - expr: | - sum(ceph_pool_rd_bytes{job="rook-ceph-mgr"}+ ceph_pool_wr_bytes{job="rook-ceph-mgr"}) - record: job:ceph_pools_iops_bytes:total - - expr: | - count(count(ceph_mon_metadata{job="rook-ceph-mgr"} or ceph_osd_metadata{job="rook-ceph-mgr"} or ceph_rgw_metadata{job="rook-ceph-mgr"} or ceph_mds_metadata{job="rook-ceph-mgr"} or ceph_mgr_metadata{job="rook-ceph-mgr"}) by(ceph_version)) - record: job:ceph_versions_running:count - - name: ceph-mgr-status - rules: - - alert: CephMgrIsAbsent - annotations: - description: Ceph Manager has disappeared from Prometheus target discovery. - message: Storage metrics collector service not available anymore. - severity_level: critical - storage_type: ceph - expr: | - absent(up{job="rook-ceph-mgr"} == 1) - for: 5m - labels: - severity: critical - - alert: CephMgrIsMissingReplicas - annotations: - description: Ceph Manager is missing replicas. - message: Storage metrics collector service doesn't have required no of replicas. - severity_level: warning - storage_type: ceph - expr: | - sum(up{job="rook-ceph-mgr"}) < 1 - for: 5m - labels: - severity: warning - - name: ceph-mds-status - rules: - - alert: CephMdsMissingReplicas - annotations: - description: Minimum required replicas for storage metadata service not available. - Might affect the working of storage cluster. - message: Insufficient replicas for storage metadata service. - severity_level: warning - storage_type: ceph - expr: | - sum(ceph_mds_metadata{job="rook-ceph-mgr"} == 1) < 2 - for: 5m - labels: - severity: warning - - name: quorum-alert.rules - rules: - - alert: CephMonQuorumAtRisk - annotations: - description: Storage cluster quorum is low. Contact Support. - message: Storage quorum at risk - severity_level: error - storage_type: ceph - expr: | - count(ceph_mon_quorum_status{job="rook-ceph-mgr"} == 1) <= ((count(ceph_mon_metadata{job="rook-ceph-mgr"}) % 2) + 1) - for: 15m - labels: - severity: critical - - alert: CephMonHighNumberOfLeaderChanges - annotations: - description: Ceph Monitor {{ $labels.ceph_daemon }} on host {{ $labels.hostname - }} has seen {{ $value | printf "%.2f" }} leader changes per minute recently. - message: Storage Cluster has seen many leader changes recently. - severity_level: warning - storage_type: ceph - expr: | - (ceph_mon_metadata{job="rook-ceph-mgr"} * on (ceph_daemon) group_left() (rate(ceph_mon_num_elections{job="rook-ceph-mgr"}[5m]) * 60)) > 0.95 - for: 5m - labels: - severity: warning - - name: ceph-node-alert.rules - rules: - - alert: CephNodeDown - annotations: - description: Storage node {{ $labels.node }} went down. Please check the node - immediately. - message: Storage node {{ $labels.node }} went down - severity_level: error - storage_type: ceph - expr: | - cluster:ceph_node_down:join_kube == 0 - for: 30s - labels: - severity: critical - - name: osd-alert.rules - rules: - - alert: CephOSDCriticallyFull - annotations: - description: Utilization of storage device {{ $labels.ceph_daemon }} of device_class - type {{$labels.device_class}} has crossed 80% on host {{ $labels.hostname - }}. Immediately free up some space or add capacity of type {{$labels.device_class}}. - message: Back-end storage device is critically full. - severity_level: error - storage_type: ceph - expr: | - (ceph_osd_metadata * on (ceph_daemon) group_right(device_class) (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes)) >= 0.80 - for: 40s - labels: - severity: critical - - alert: CephOSDFlapping - annotations: - description: Storage daemon {{ $labels.ceph_daemon }} has restarted 5 times - in last 5 minutes. Please check the pod events or ceph status to find out - the cause. - message: Ceph storage osd flapping. - severity_level: error - storage_type: ceph - expr: | - changes(ceph_osd_up[5m]) >= 10 - for: 0s - labels: - severity: critical - - alert: CephOSDNearFull - annotations: - description: Utilization of storage device {{ $labels.ceph_daemon }} of device_class - type {{$labels.device_class}} has crossed 75% on host {{ $labels.hostname - }}. Immediately free up some space or add capacity of type {{$labels.device_class}}. - message: Back-end storage device is nearing full. - severity_level: warning - storage_type: ceph - expr: | - (ceph_osd_metadata * on (ceph_daemon) group_right(device_class) (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes)) >= 0.75 - for: 40s - labels: - severity: warning - - alert: CephOSDDiskNotResponding - annotations: - description: Disk device {{ $labels.device }} not responding, on host {{ $labels.host - }}. - message: Disk not responding - severity_level: error - storage_type: ceph - expr: | - label_replace((ceph_osd_in == 1 and ceph_osd_up == 0),"disk","$1","ceph_daemon","osd.(.*)") + on(ceph_daemon) group_left(host, device) label_replace(ceph_disk_occupation,"host","$1","exported_instance","(.*)") - for: 1m - labels: - severity: critical - - alert: CephOSDDiskUnavailable - annotations: - description: Disk device {{ $labels.device }} not accessible on host {{ $labels.host - }}. - message: Disk not accessible - severity_level: error - storage_type: ceph - expr: | - label_replace((ceph_osd_in == 0 and ceph_osd_up == 0),"disk","$1","ceph_daemon","osd.(.*)") + on(ceph_daemon) group_left(host, device) label_replace(ceph_disk_occupation,"host","$1","exported_instance","(.*)") - for: 1m - labels: - severity: critical - - alert: CephOSDSlowOps - annotations: - description: '{{ $value }} Ceph OSD requests are taking too long to process. - Please check ceph status to find out the cause.' - message: OSD requests are taking too long to process. - severity_level: warning - storage_type: ceph - expr: | - ceph_healthcheck_slow_ops > 0 - for: 30s - labels: - severity: warning - - alert: CephDataRecoveryTakingTooLong - annotations: - description: Data recovery has been active for too long. Contact Support. - message: Data recovery is slow - severity_level: warning - storage_type: ceph - expr: | - ceph_pg_undersized > 0 - for: 2h - labels: - severity: warning - - alert: CephPGRepairTakingTooLong - annotations: - description: Self heal operations taking too long. Contact Support. - message: Self heal problems detected - severity_level: warning - storage_type: ceph - expr: | - ceph_pg_inconsistent > 0 - for: 1h - labels: - severity: warning - - name: persistent-volume-alert.rules - rules: - - alert: PersistentVolumeUsageNearFull - annotations: - description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed - 75%. Free up some space or expand the PVC. - message: PVC {{ $labels.persistentvolumeclaim }} is nearing full. Data deletion - or PVC expansion is required. - severity_level: warning - storage_type: ceph - expr: | - (kubelet_volume_stats_used_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) / (kubelet_volume_stats_capacity_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) > 0.75 - for: 5s - labels: - severity: warning - - alert: PersistentVolumeUsageCritical - annotations: - description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed - 85%. Free up some space or expand the PVC immediately. - message: PVC {{ $labels.persistentvolumeclaim }} is critically full. Data - deletion or PVC expansion is required. - severity_level: error - storage_type: ceph - expr: | - (kubelet_volume_stats_used_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) / (kubelet_volume_stats_capacity_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) > 0.85 - for: 5s - labels: - severity: critical - - name: cluster-state-alert.rules - rules: - - alert: CephClusterErrorState - annotations: - description: Storage cluster is in error state for more than 10m. - message: Storage cluster is in error state - severity_level: error - storage_type: ceph - expr: | - ceph_health_status{job="rook-ceph-mgr"} > 1 - for: 10m - labels: - severity: critical - - alert: CephClusterWarningState - annotations: - description: Storage cluster is in warning state for more than 10m. - message: Storage cluster is in degraded state - severity_level: warning - storage_type: ceph - expr: | - ceph_health_status{job="rook-ceph-mgr"} == 1 - for: 10m - labels: - severity: warning - - alert: CephOSDVersionMismatch - annotations: - description: There are {{ $value }} different versions of Ceph OSD components - running. - message: There are multiple versions of storage services running. - severity_level: warning - storage_type: ceph - expr: | - count(count(ceph_osd_metadata{job="rook-ceph-mgr"}) by (ceph_version)) > 1 - for: 10m - labels: - severity: warning - - alert: CephMonVersionMismatch - annotations: - description: There are {{ $value }} different versions of Ceph Mon components - running. - message: There are multiple versions of storage services running. - severity_level: warning - storage_type: ceph - expr: | - count(count(ceph_mon_metadata{job="rook-ceph-mgr"}) by (ceph_version)) > 1 - for: 10m - labels: - severity: warning - - name: cluster-utilization-alert.rules - rules: - - alert: CephClusterNearFull - annotations: - description: Storage cluster utilization has crossed 75% and will become read-only - at 85%. Free up some space or expand the storage cluster. - message: Storage cluster is nearing full. Data deletion or cluster expansion - is required. - severity_level: warning - storage_type: ceph - expr: | - ceph_cluster_total_used_raw_bytes / ceph_cluster_total_bytes > 0.75 - for: 5s - labels: - severity: warning - - alert: CephClusterCriticallyFull - annotations: - description: Storage cluster utilization has crossed 80% and will become read-only - at 85%. Free up some space or expand the storage cluster immediately. - message: Storage cluster is critically full and needs immediate data deletion - or cluster expansion. - severity_level: error - storage_type: ceph - expr: | - ceph_cluster_total_used_raw_bytes / ceph_cluster_total_bytes > 0.80 - for: 5s - labels: - severity: critical - - alert: CephClusterReadOnly - annotations: - description: Storage cluster utilization has crossed 85% and will become read-only - now. Free up some space or expand the storage cluster immediately. - message: Storage cluster is read-only now and needs immediate data deletion - or cluster expansion. - severity_level: error - storage_type: ceph - expr: | - ceph_cluster_total_used_raw_bytes / ceph_cluster_total_bytes >= 0.85 - for: 0s - labels: - severity: critical - - name: pool-quota.rules - rules: - - alert: CephPoolQuotaBytesNearExhaustion - annotations: - description: Storage pool {{ $labels.name }} quota usage has crossed 70%. - message: Storage pool quota(bytes) is near exhaustion. - severity_level: warning - storage_type: ceph - expr: | - (ceph_pool_stored_raw * on (pool_id) group_left(name)ceph_pool_metadata) / ((ceph_pool_quota_bytes * on (pool_id) group_left(name)ceph_pool_metadata) > 0) > 0.70 - for: 1m - labels: - severity: warning - - alert: CephPoolQuotaBytesCriticallyExhausted - annotations: - description: Storage pool {{ $labels.name }} quota usage has crossed 90%. - message: Storage pool quota(bytes) is critically exhausted. - severity_level: critical - storage_type: ceph - expr: | - (ceph_pool_stored_raw * on (pool_id) group_left(name)ceph_pool_metadata) / ((ceph_pool_quota_bytes * on (pool_id) group_left(name)ceph_pool_metadata) > 0) > 0.90 - for: 1m - labels: - severity: critical \ No newline at end of file diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v15-rules-external.yaml b/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v15-rules-external.yaml deleted file mode 120000 index 73fd374bc..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v15-rules-external.yaml +++ /dev/null @@ -1 +0,0 @@ -prometheus-ceph-v14-rules-external.yaml \ No newline at end of file diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v15-rules.yaml b/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v15-rules.yaml deleted file mode 120000 index 5df99c1c0..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v15-rules.yaml +++ /dev/null @@ -1 +0,0 @@ -prometheus-ceph-v14-rules.yaml \ No newline at end of file diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v16-rules-external.yaml b/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v16-rules-external.yaml deleted file mode 120000 index 73fd374bc..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v16-rules-external.yaml +++ /dev/null @@ -1 +0,0 @@ -prometheus-ceph-v14-rules-external.yaml \ No newline at end of file diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v16-rules.yaml b/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v16-rules.yaml deleted file mode 120000 index 5df99c1c0..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v16-rules.yaml +++ /dev/null @@ -1 +0,0 @@ -prometheus-ceph-v14-rules.yaml \ No newline at end of file diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-service.yaml b/cluster/examples/kubernetes/ceph/monitoring/prometheus-service.yaml deleted file mode 100644 index 4a8d35d27..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/prometheus-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: rook-prometheus - namespace: rook-ceph -spec: - type: NodePort - ports: - - name: web - nodePort: 30900 - port: 9090 - protocol: TCP - targetPort: web - selector: - prometheus: rook-prometheus diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus.yaml b/cluster/examples/kubernetes/ceph/monitoring/prometheus.yaml deleted file mode 100644 index df25ad975..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/prometheus.yaml +++ /dev/null @@ -1,69 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: prometheus - namespace: rook-ceph ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: prometheus -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.ceph.rook.io/aggregate-to-prometheus: "true" -rules: [] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: prometheus-rules - labels: - rbac.ceph.rook.io/aggregate-to-prometheus: "true" -rules: -- apiGroups: [""] - resources: - - nodes - - services - - endpoints - - pods - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: - - configmaps - verbs: ["get"] -- nonResourceURLs: ["/metrics"] - verbs: ["get"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: prometheus -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: prometheus -subjects: -- kind: ServiceAccount - name: prometheus - namespace: rook-ceph ---- -apiVersion: monitoring.coreos.com/v1 -kind: Prometheus -metadata: - name: rook-prometheus - namespace: rook-ceph - labels: - prometheus: rook-prometheus -spec: - serviceAccountName: prometheus - serviceMonitorSelector: - matchLabels: - team: rook - ruleSelector: - matchLabels: - role: alert-rules - prometheus: rook-prometheus - resources: - requests: - memory: 400Mi diff --git a/cluster/examples/kubernetes/ceph/monitoring/rbac.yaml b/cluster/examples/kubernetes/ceph/monitoring/rbac.yaml deleted file mode 100644 index 97c86d4e7..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/rbac.yaml +++ /dev/null @@ -1,108 +0,0 @@ ---- -# OLM: BEGIN ROLE -# Aspects for creation of monitoring resources -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-monitor - namespace: rook-ceph -rules: -- apiGroups: - - monitoring.coreos.com - resources: - - '*' - verbs: - - '*' -# OLM: END ROLE ---- -# OLM: BEGIN ROLE BINDING -# Allow creation of monitoring resources -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-monitor - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-monitor -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph -# OLM: END ROLE BINDING ---- -# OLM: BEGIN ROLE -# Aspects for metrics collection -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-metrics - namespace: rook-ceph -rules: - - apiGroups: - - "" - resources: - - services - - endpoints - - pods - verbs: - - get - - list - - watch -# OLM: END ROLE ---- -# OLM: BEGIN ROLE BINDING -# Allow collection of metrics -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-metrics - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-metrics -subjects: -- kind: ServiceAccount - # change to the serviceaccount and namespace to use for monitoring - name: prometheus-k8s - namespace: rook-ceph -# OLM: END ROLE BINDING ---- -# OLM: BEGIN ROLE -# Allow management of monitoring resources in the mgr -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-monitor-mgr - namespace: rook-ceph -rules: -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - get - - list - - create - - update -# OLM: END ROLE ---- -# OLM: BEGIN ROLE BINDING -# Allow creation of monitoring resources in the mgr -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-monitor-mgr - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-monitor-mgr -subjects: -- kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph -# OLM: END ROLE BINDING ---- diff --git a/cluster/examples/kubernetes/ceph/monitoring/service-monitor.yaml b/cluster/examples/kubernetes/ceph/monitoring/service-monitor.yaml deleted file mode 100644 index 77e82c959..000000000 --- a/cluster/examples/kubernetes/ceph/monitoring/service-monitor.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: rook-ceph-mgr - namespace: rook-ceph - labels: - team: rook -spec: - namespaceSelector: - matchNames: - - rook-ceph - selector: - matchLabels: - app: rook-ceph-mgr - rook_cluster: rook-ceph - ceph_daemon_id: a - endpoints: - - port: http-metrics - path: /metrics - interval: 5s diff --git a/cluster/examples/kubernetes/ceph/nfs-test.yaml b/cluster/examples/kubernetes/ceph/nfs-test.yaml deleted file mode 100644 index 46770bdb6..000000000 --- a/cluster/examples/kubernetes/ceph/nfs-test.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: ceph.rook.io/v1 -kind: CephNFS -metadata: - name: my-nfs - namespace: rook-ceph # namespace:cluster -spec: - rados: - # RADOS pool where NFS client recovery data is stored. - # In this example the data pool for the "myfs" filesystem is used. - # If using the object store example, the data pool would be "my-store.rgw.buckets.data". - pool: myfs-data0 - # RADOS namespace where NFS client recovery data is stored in the pool. - namespace: nfs-ns - # Settings for the NFS server - server: - # the number of active NFS servers - active: 1 - # The logging levels: NIV_NULL | NIV_FATAL | NIV_MAJ | NIV_CRIT | NIV_WARN | NIV_EVENT | NIV_INFO | NIV_DEBUG | NIV_MID_DEBUG |NIV_FULL_DEBUG |NB_LOG_LEVEL - logLevel: NIV_INFO diff --git a/cluster/examples/kubernetes/ceph/nfs.yaml b/cluster/examples/kubernetes/ceph/nfs.yaml deleted file mode 100644 index 86c99a2c5..000000000 --- a/cluster/examples/kubernetes/ceph/nfs.yaml +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: ceph.rook.io/v1 -kind: CephNFS -metadata: - name: my-nfs - namespace: rook-ceph # namespace:cluster -spec: - rados: - # RADOS pool where NFS client recovery data is stored, must be a replica pool. EC pools don't support omap which is required by ganesha. - # In this example the data pool for the "myfs" filesystem is used. Separate pool for storing ganesha recovery data is recommended. - # Due to this dashboard issue https://tracker.ceph.com/issues/46176. - # If using the object store example, the data pool would be "my-store.rgw.buckets.data". - pool: myfs-data0 - # RADOS namespace where NFS client recovery data is stored in the pool. - namespace: nfs-ns - # Settings for the NFS server - server: - # the number of active NFS servers - active: 3 - # where to run the NFS server - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - mds-node - # topologySpreadConstraints: - # tolerations: - # - key: mds-node - # operator: Exists - # podAffinity: - # podAntiAffinity: - # A key/value list of annotations - annotations: - # key: value - # The requests and limits set here allow the ganesha pod(s) to use half of one CPU core and 1 gigabyte of memory - resources: - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # the priority class to set to influence the scheduler's pod preemption - #priorityClassName: - # The logging levels: NIV_NULL | NIV_FATAL | NIV_MAJ | NIV_CRIT | NIV_WARN | NIV_EVENT | NIV_INFO | NIV_DEBUG | NIV_MID_DEBUG |NIV_FULL_DEBUG |NB_LOG_LEVEL - logLevel: NIV_INFO diff --git a/cluster/examples/kubernetes/ceph/object-bucket-claim-delete.yaml b/cluster/examples/kubernetes/ceph/object-bucket-claim-delete.yaml deleted file mode 100644 index 1b58b8f0f..000000000 --- a/cluster/examples/kubernetes/ceph/object-bucket-claim-delete.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: objectbucket.io/v1alpha1 -kind: ObjectBucketClaim -metadata: - name: ceph-delete-bucket -spec: - # To create a new bucket specify either `bucketName` or - # `generateBucketName` here. Both cannot be used. To access - # an existing bucket the bucket name needs to be defined in - # the StorageClass referenced here, and both `bucketName` and - # `generateBucketName` must be omitted in the OBC. - #bucketName: - generateBucketName: ceph-bkt - storageClassName: rook-ceph-delete-bucket - additionalConfig: - # To set for quota for OBC - #maxObjects: "1000" - #maxSize: "2G" diff --git a/cluster/examples/kubernetes/ceph/object-bucket-claim-retain.yaml b/cluster/examples/kubernetes/ceph/object-bucket-claim-retain.yaml deleted file mode 100644 index b4de5c734..000000000 --- a/cluster/examples/kubernetes/ceph/object-bucket-claim-retain.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: objectbucket.io/v1alpha1 -kind: ObjectBucketClaim -metadata: - name: ceph-retain-bucket -spec: - # To create a new bucket specify either `bucketName` or - # `generateBucketName` here. Both cannot be used. To access - # an existing bucket the bucket name needs to be defined in - # the StorageClass referenced here, and both `bucketName` and - # `generateBucketName` must be omitted in the OBC. - #bucketName: - generateBucketName: ceph-bkt - storageClassName: rook-ceph-retain-bucket - additionalConfig: - # To set for quota for OBC - #maxObjects: "1000" - #maxSize: "2G" diff --git a/cluster/examples/kubernetes/ceph/object-ec.yaml b/cluster/examples/kubernetes/ceph/object-ec.yaml deleted file mode 100644 index 08347cf3e..000000000 --- a/cluster/examples/kubernetes/ceph/object-ec.yaml +++ /dev/null @@ -1,92 +0,0 @@ -################################################################################################################# -# Create an object store with settings for erasure coding for the data pool. A minimum of 3 nodes with OSDs are -# required in this example since failureDomain is host. -# kubectl create -f object-ec.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: my-store - namespace: rook-ceph # namespace:cluster -spec: - # The pool spec used to create the metadata pools. Must use replication. - metadataPool: - failureDomain: host - replicated: - size: 3 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: true - parameters: - # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - compression_mode: none - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #target_size_ratio: ".5" - # The pool spec used to create the data pool. Can use replication or erasure coding. - dataPool: - failureDomain: host - erasureCoded: - dataChunks: 2 - codingChunks: 1 - parameters: - # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - compression_mode: none - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #target_size_ratio: ".5" - # Whether to preserve metadata and data pools on object store deletion - preservePoolsOnDelete: true - # The gateway service configuration - gateway: - # A reference to the secret in the rook namespace where the ssl certificate is stored - sslCertificateRef: - # The port that RGW pods will listen on (http) - port: 80 - # The port that RGW pods will listen on (https). An ssl certificate is required. - # securePort: 443 - # The number of pods in the rgw deployment - instances: 1 - # The affinity rules to apply to the rgw deployment or daemonset. - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - rgw-node - # tolerations: - # - key: rgw-node - # operator: Exists - # podAffinity: - # podAntiAffinity: - # A key/value list of annotations - annotations: - # key: value - # A key/value list of labels - labels: - # key: value - resources: - # The requests and limits set here, allow the object store gateway Pod(s) to use half of one CPU core and 1 gigabyte of memory - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # priorityClassName: my-priority-class - #zone: - #name: zone-a - # service endpoint healthcheck - healthCheck: - bucket: - disabled: false - interval: 60s - # Configure the pod liveness probe for the rgw daemon - livenessProbe: - disabled: false diff --git a/cluster/examples/kubernetes/ceph/object-external.yaml b/cluster/examples/kubernetes/ceph/object-external.yaml deleted file mode 100644 index 172b52e7c..000000000 --- a/cluster/examples/kubernetes/ceph/object-external.yaml +++ /dev/null @@ -1,22 +0,0 @@ -################################################################################################################# -# Create an object store with settings for replication in a production environment. A minimum of 3 hosts with -# OSDs are required in this example. -# kubectl create -f object.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: external-store - namespace: rook-ceph # namespace:cluster -spec: - gateway: - # The port on which **ALL** the gateway(s) are listening on. - # Passing a single IP from a load-balancer is also valid. - port: 80 - externalRgwEndpoints: - - ip: 192.168.39.182 - healthCheck: - bucket: - disabled: false - interval: 60s diff --git a/cluster/examples/kubernetes/ceph/object-multisite-pull-realm.yaml b/cluster/examples/kubernetes/ceph/object-multisite-pull-realm.yaml deleted file mode 100644 index 2d678eb9c..000000000 --- a/cluster/examples/kubernetes/ceph/object-multisite-pull-realm.yaml +++ /dev/null @@ -1,63 +0,0 @@ -################################################################################################################# -# Create an object store with settings for a test environment. Only a single OSD is required in this example. -# kubectl create -f object-multisite.yaml -################################################################################################################# -apiVersion: v1 -kind: Secret -metadata: - name: realm-a-keys - namespace: myceph -data: - # TODO: Replace with keys for your cluster - # these keys should be the base64 encoded versions of the actual keys or copied from the realm's on the other cluster's secret - access-key: VzFjNFltMVdWRTFJWWxZelZWQT0= - secret-key: WVY1MFIxeExkbG84U3pKdlRseEZXVGR3T3k1U1dUSS9KaTFoUVE9PQ== ---- -apiVersion: ceph.rook.io/v1 -kind: CephObjectRealm -metadata: - name: realm-a - namespace: new-rook-ceph-namespace -spec: - # This endpoint in this section needs is an endpoint from the master zone in the master zone group of realm-a. See object-multisite.md for more details. - pull: - endpoint: http://10.103.133.16:80 ---- -apiVersion: ceph.rook.io/v1 -kind: CephObjectZoneGroup -metadata: - name: zonegroup-a - namespace: new-rook-ceph-namespace -spec: - realm: realm-a ---- -apiVersion: ceph.rook.io/v1 -kind: CephObjectZone -metadata: - name: zone-b - namespace: new-rook-ceph-namespace -spec: - zoneGroup: zonegroup-a - metadataPool: - failureDomain: host - replicated: - size: 1 - requireSafeReplicaSize: true - dataPool: - failureDomain: host - replicated: - size: 1 - requireSafeReplicaSize: true ---- -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: zone-b-multisite-store - namespace: new-rook-ceph-namespace -spec: - gateway: - port: 80 - # securePort: 443 - instances: 1 - zone: - name: zone-b diff --git a/cluster/examples/kubernetes/ceph/object-multisite.yaml b/cluster/examples/kubernetes/ceph/object-multisite.yaml deleted file mode 100644 index 247d825c4..000000000 --- a/cluster/examples/kubernetes/ceph/object-multisite.yaml +++ /dev/null @@ -1,50 +0,0 @@ -################################################################################################################# -# Create an object store with settings for a test environment. Only a single OSD is required in this example. -# kubectl create -f object-multisite.yaml -################################################################################################################# -apiVersion: ceph.rook.io/v1 -kind: CephObjectRealm -metadata: - name: realm-a - namespace: rook-ceph # namespace:cluster ---- -apiVersion: ceph.rook.io/v1 -kind: CephObjectZoneGroup -metadata: - name: zonegroup-a - namespace: rook-ceph # namespace:cluster -spec: - realm: realm-a ---- -apiVersion: ceph.rook.io/v1 -kind: CephObjectZone -metadata: - name: zone-a - namespace: rook-ceph # namespace:cluster -spec: - zoneGroup: zonegroup-a - metadataPool: - failureDomain: host - replicated: - size: 1 - requireSafeReplicaSize: true - dataPool: - failureDomain: host - replicated: - size: 1 - requireSafeReplicaSize: true - parameters: - compression_mode: none ---- -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: multisite-store - namespace: rook-ceph # namespace:cluster -spec: - gateway: - port: 80 - # securePort: 443 - instances: 1 - zone: - name: zone-a diff --git a/cluster/examples/kubernetes/ceph/object-openshift.yaml b/cluster/examples/kubernetes/ceph/object-openshift.yaml deleted file mode 100644 index 6fa870446..000000000 --- a/cluster/examples/kubernetes/ceph/object-openshift.yaml +++ /dev/null @@ -1,141 +0,0 @@ -################################################################################################################# -# Create an object store with settings specific for OpenShift. A minimum of 3 OSDs are required in this example -# for the replication since the failureDomain is host. -# oc create -f object-openshift.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: my-store - namespace: rook-ceph # namespace:cluster -spec: - # The pool spec used to create the metadata pools. Must use replication. - metadataPool: - failureDomain: host - replicated: - size: 3 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: true - parameters: - # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - compression_mode: none - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #target_size_ratio: ".5" - # The pool spec used to create the data pool. Can use replication or erasure coding. - dataPool: - failureDomain: host - replicated: - size: 3 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: true - parameters: - # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - compression_mode: none - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #target_size_ratio: ".5" - # Whether to preserve metadata and data pools on object store deletion - preservePoolsOnDelete: true - # The gateway service configuration - gateway: - # A reference to the secret in the rook namespace where the ssl certificate is stored - # sslCertificateRef: - # The port that RGW pods will listen on (http) - port: 8080 - # The port that RGW pods will listen on (https). An ssl certificate is required. - # securePort: 443 - # The number of pods in the rgw deployment - instances: 1 - # The affinity rules to apply to the rgw deployment - placement: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-rgw - # topologyKey: */zone can be used to spread RGW across different AZ - # Use in k8s cluster if your cluster is v1.16 or lower - # Use in k8s cluster is v1.17 or upper - topologyKey: kubernetes.io/hostname - # topologySpreadConstraints: - # tolerations: - # - key: rgw-node - # operator: Exists - # podAffinity: - # podAntiAffinity: - # A key/value list of annotations - annotations: - # key: value - # A key/value list of labels - labels: - # key: value - resources: - # The requests and limits set here, allow the object store gateway Pod(s) to use half of one CPU core and 1 gigabyte of memory - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # priorityClassName: my-priority-class - #zone: - # name: zone-a - # # Configuration for the Kubernetes Service created for the RGW cluster - #service: - # # A key-value list of annotations to apply to the RGW service - # annotations: - # service.beta.openshift.io/serving-cert-secret-name: - # service endpoint healthcheck - healthCheck: - bucket: - disabled: false - interval: 60s - # Configure the pod liveness probe for the rgw daemon - livenessProbe: - disabled: false - # security oriented settings - # security: - # To enable the KMS configuration properly don't forget to uncomment the Secret at the end of the file - # kms: - # # name of the config map containing all the kms connection details - # connectionDetails: - # KMS_PROVIDER: "vault" - # VAULT_ADDR: VAULT_ADDR_CHANGE_ME # e,g: http://vault.my-domain.com:8200 - # VAULT_BACKEND_PATH: "rook" - # VAULT_SECRET_ENGINE: "kv" - # VAULT_BACKEND: v2 - # # name of the secret containing the kms authentication token - # tokenSecretName: rook-vault-token -# # UNCOMMENT THIS TO ENABLE A KMS CONNECTION -# # Also, do not forget to replace both: -# # * ROOK_TOKEN_CHANGE_ME: with a base64 encoded value of the token to use -# # * VAULT_ADDR_CHANGE_ME: with the Vault address -# --- -# apiVersion: v1 -# kind: Secret -# metadata: -# name: rook-vault-token -# namespace: rook-ceph # namespace:cluster -# data: -# token: ROOK_TOKEN_CHANGE_ME ---- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: rook-ceph-rgw-my-store # We recommend name to be the same as the service name below, but it is not required - namespace: rook-ceph # namespace:cluster -spec: - to: - kind: Service - name: rook-ceph-rgw-my-store # The name of the RGW service is in the form 'rook-ceph-rgw-' diff --git a/cluster/examples/kubernetes/ceph/object-test.yaml b/cluster/examples/kubernetes/ceph/object-test.yaml deleted file mode 100644 index d53b5f346..000000000 --- a/cluster/examples/kubernetes/ceph/object-test.yaml +++ /dev/null @@ -1,22 +0,0 @@ -################################################################################################################# -# Create an object store with settings for a test environment. Only a single OSD is required in this example. -# kubectl create -f object-test.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: my-store - namespace: rook-ceph # namespace:cluster -spec: - metadataPool: - replicated: - size: 1 - dataPool: - replicated: - size: 1 - preservePoolsOnDelete: false - gateway: - port: 80 - # securePort: 443 - instances: 1 diff --git a/cluster/examples/kubernetes/ceph/object-user.yaml b/cluster/examples/kubernetes/ceph/object-user.yaml deleted file mode 100644 index bf2b6b41f..000000000 --- a/cluster/examples/kubernetes/ceph/object-user.yaml +++ /dev/null @@ -1,13 +0,0 @@ -################################################################################################################# -# Create an object store user for access to the s3 endpoint. -# kubectl create -f object-user.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephObjectStoreUser -metadata: - name: my-user - namespace: rook-ceph # namespace:cluster -spec: - store: my-store - displayName: "my display name" diff --git a/cluster/examples/kubernetes/ceph/object.yaml b/cluster/examples/kubernetes/ceph/object.yaml deleted file mode 100644 index 4fd04a387..000000000 --- a/cluster/examples/kubernetes/ceph/object.yaml +++ /dev/null @@ -1,137 +0,0 @@ -################################################################################################################# -# Create an object store with settings for replication in a production environment. A minimum of 3 hosts with -# OSDs are required in this example. -# kubectl create -f object.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: my-store - namespace: rook-ceph # namespace:cluster -spec: - # The pool spec used to create the metadata pools. Must use replication. - metadataPool: - failureDomain: host - replicated: - size: 3 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: true - parameters: - # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - compression_mode: none - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #target_size_ratio: ".5" - # The pool spec used to create the data pool. Can use replication or erasure coding. - dataPool: - failureDomain: host - replicated: - size: 3 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: true - parameters: - # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - compression_mode: none - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #target_size_ratio: ".5" - # Whether to preserve metadata and data pools on object store deletion - preservePoolsOnDelete: false - # The gateway service configuration - gateway: - # A reference to the secret in the rook namespace where the ssl certificate is stored - # sslCertificateRef: - # A reference to the secret in the rook namespace where the ca bundle is stored - # caBundleRef: - # The port that RGW pods will listen on (http) - port: 80 - # The port that RGW pods will listen on (https). An ssl certificate is required. - # securePort: 443 - # The number of pods in the rgw deployment - instances: 1 - # The affinity rules to apply to the rgw deployment. - placement: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-rgw - # topologyKey: */zone can be used to spread RGW across different AZ - # Use in k8s cluster if your cluster is v1.16 or lower - # Use in k8s cluster is v1.17 or upper - topologyKey: kubernetes.io/hostname - # A key/value list of annotations - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - rgw-node - # topologySpreadConstraints: - # tolerations: - # - key: rgw-node - # operator: Exists - # podAffinity: - # podAntiAffinity: - # A key/value list of annotations - annotations: - # key: value - # A key/value list of labels - labels: - # key: value - resources: - # The requests and limits set here, allow the object store gateway Pod(s) to use half of one CPU core and 1 gigabyte of memory - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # priorityClassName: my-priority-class - #zone: - #name: zone-a - # service endpoint healthcheck - healthCheck: - bucket: - disabled: false - interval: 60s - # Configure the pod liveness probe for the rgw daemon - livenessProbe: - disabled: false - # security oriented settings - # security: - # To enable the KMS configuration properly don't forget to uncomment the Secret at the end of the file - # kms: - # # name of the config map containing all the kms connection details - # connectionDetails: - # KMS_PROVIDER: "vault" - # VAULT_ADDR: VAULT_ADDR_CHANGE_ME # e,g: http://vault.my-domain.com:8200 - # VAULT_BACKEND_PATH: "rook" - # VAULT_SECRET_ENGINE: "kv" - # VAULT_BACKEND: v2 - # # name of the secret containing the kms authentication token - # tokenSecretName: rook-vault-token -# # UNCOMMENT THIS TO ENABLE A KMS CONNECTION -# # Also, do not forget to replace both: -# # * ROOK_TOKEN_CHANGE_ME: with a base64 encoded value of the token to use -# # * VAULT_ADDR_CHANGE_ME: with the Vault address -# --- -# apiVersion: v1 -# kind: Secret -# metadata: -# name: rook-vault-token -# namespace: rook-ceph # namespace:cluster -# data: -# token: ROOK_TOKEN_CHANGE_ME diff --git a/cluster/examples/kubernetes/ceph/operator-openshift.yaml b/cluster/examples/kubernetes/ceph/operator-openshift.yaml deleted file mode 100644 index d3cabecbf..000000000 --- a/cluster/examples/kubernetes/ceph/operator-openshift.yaml +++ /dev/null @@ -1,580 +0,0 @@ -################################################################################################################# -# Create the rook operator and necessary security context constraints for running -# Rook in an OpenShift cluster. -# For example, to create the rook-ceph cluster: -# oc create -f crds.yaml -f common.yaml -f operator-openshift.yaml -# oc create -f cluster.yaml -################################################################################################################# -# scc for the Rook and Ceph daemons -kind: SecurityContextConstraints -apiVersion: security.openshift.io/v1 -metadata: - name: rook-ceph -allowPrivilegedContainer: true -allowHostNetwork: true -allowHostDirVolumePlugin: true -priority: -allowedCapabilities: [] -allowHostPorts: true -allowHostPID: true # remove this once we drop support for Nautilus -allowHostIPC: true -readOnlyRootFilesystem: false -requiredDropCapabilities: [] -defaultAddCapabilities: [] -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -fsGroup: - type: MustRunAs -supplementalGroups: - type: RunAsAny -allowedFlexVolumes: - - driver: "ceph.rook.io/rook" - - driver: "ceph.rook.io/rook-ceph" -volumes: - - configMap - - downwardAPI - - emptyDir - - flexVolume - - hostPath - - persistentVolumeClaim - - projected - - secret -users: - # A user needs to be added for each rook service account. - # This assumes running in the default sample "rook-ceph" namespace. - # If other namespaces or service accounts are configured, they need to be updated here. - - system:serviceaccount:rook-ceph:rook-ceph-system # serviceaccount:namespace:operator - - system:serviceaccount:rook-ceph:default # serviceaccount:namespace:cluster - - system:serviceaccount:rook-ceph:rook-ceph-mgr # serviceaccount:namespace:cluster - - system:serviceaccount:rook-ceph:rook-ceph-osd # serviceaccount:namespace:cluster ---- -# scc for the CSI driver -kind: SecurityContextConstraints -apiVersion: security.openshift.io/v1 -metadata: - name: rook-ceph-csi -allowPrivilegedContainer: true -allowHostNetwork: true -allowHostDirVolumePlugin: true -priority: -allowedCapabilities: ["*"] -allowHostPorts: true -allowHostPID: true -allowHostIPC: true -readOnlyRootFilesystem: false -requiredDropCapabilities: [] -defaultAddCapabilities: [] -runAsUser: - type: RunAsAny -seLinuxContext: - type: RunAsAny -fsGroup: - type: RunAsAny -supplementalGroups: - type: RunAsAny -allowedFlexVolumes: - - driver: "ceph.rook.io/rook" - - driver: "ceph.rook.io/rook-ceph" -volumes: ["*"] -users: - # A user needs to be added for each rook service account. - # This assumes running in the default sample "rook-ceph" namespace. - # If other namespaces or service accounts are configured, they need to be updated here. - - system:serviceaccount:rook-ceph:rook-csi-rbd-plugin-sa # serviceaccount:namespace:operator - - system:serviceaccount:rook-ceph:rook-csi-rbd-provisioner-sa # serviceaccount:namespace:operator - - system:serviceaccount:rook-ceph:rook-csi-cephfs-plugin-sa # serviceaccount:namespace:operator - - system:serviceaccount:rook-ceph:rook-csi-cephfs-provisioner-sa # serviceaccount:namespace:operator ---- -# Rook Ceph Operator Config -# Use this ConfigMap to override operator configurations -# Precedence will be given to this config in case Env Var also exists for the same -# -kind: ConfigMap -apiVersion: v1 -metadata: - name: rook-ceph-operator-config - # should be in the namespace of the operator - namespace: rook-ceph # namespace:operator -data: - # The logging level for the operator: INFO | DEBUG - ROOK_LOG_LEVEL: "INFO" - - # Enable the CSI driver. - # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml - ROOK_CSI_ENABLE_CEPHFS: "true" - # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below. - ROOK_CSI_ENABLE_RBD: "true" - ROOK_CSI_ENABLE_GRPC_METRICS: "false" - - # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary - # in some network configurations where the SDN does not provide access to an external cluster or - # there is significant drop in read/write performance. - # CSI_ENABLE_HOST_NETWORK: "true" - - # Set logging level for csi containers. - # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. - # CSI_LOG_LEVEL: "0" - - # OMAP generator generates the omap mapping between the PV name and the RBD image - # which helps CSI to identify the rbd images for CSI operations. - # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature. - # By default OMAP generator is disabled and when enabled it willbe deployed as a - # sidecar with CSI provisioner pod, to enable set it to true. - # CSI_ENABLE_OMAP_GENERATOR: "true" - - # set to false to disable deployment of snapshotter container in CephFS provisioner pod. - CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true" - - # set to false to disable deployment of snapshotter container in RBD provisioner pod. - CSI_ENABLE_RBD_SNAPSHOTTER: "true" - - # Enable Ceph Kernel clients on kernel < 4.17 which support quotas for Cephfs - # If you disable the kernel client, your application may be disrupted during upgrade. - # See the upgrade guide: https://rook.io/docs/rook/master/ceph-upgrade.html - CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true" - - # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted. - # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html - CSI_RBD_FSGROUPPOLICY: "ReadWriteOnceWithFSType" - - # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted. - # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html - CSI_CEPHFS_FSGROUPPOLICY: "None" - - # (Optional) Allow starting unsupported ceph-csi image - ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false" - # The default version of CSI supported by Rook will be started. To change the version - # of the CSI driver to something other than what is officially supported, change - # these images to the desired release of the CSI driver. - # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.4.0" - # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0" - # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.2.0" - # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2" - # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1" - # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.2.1" - - # (Optional) set user created priorityclassName for csi plugin pods. - # CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical" - - # (Optional) set user created priorityclassName for csi provisioner pods. - # CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical" - - # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. - # Default value is RollingUpdate. - # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete" - # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. - # Default value is RollingUpdate. - # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete" - - # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path. - # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet" - - # Labels to add to the CSI CephFS Deployments and DaemonSets Pods. - # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2" - # Labels to add to the CSI RBD Deployments and DaemonSets Pods. - # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2" - - # (Optional) CephCSI provisioner NodeAffinity(applied to both CephFS and RBD provisioner). - # CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" - # (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner). - # Put here list of taints you want to tolerate in YAML format. - # CSI provisioner would be best to start on the same nodes as other ceph daemons. - # CSI_PROVISIONER_TOLERATIONS: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - # (Optional) CephCSI plugin NodeAffinity(applied to both CephFS and RBD plugin). - # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" - # (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin). - # Put here list of taints you want to tolerate in YAML format. - # CSI plugins need to be started on all the nodes where the clients need to mount the storage. - # CSI_PLUGIN_TOLERATIONS: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - - # (Optional) CephCSI RBD provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY). - # CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node" - # (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS). - # Put here list of taints you want to tolerate in YAML format. - # CSI provisioner would be best to start on the same nodes as other ceph daemons. - # CSI_RBD_PROVISIONER_TOLERATIONS: | - # - key: node.rook.io/rbd - # operator: Exists - # (Optional) CephCSI RBD plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY). - # CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node" - # (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS). - # Put here list of taints you want to tolerate in YAML format. - # CSI plugins need to be started on all the nodes where the clients need to mount the storage. - # CSI_RBD_PLUGIN_TOLERATIONS: | - # - key: node.rook.io/rbd - # operator: Exists - - # (Optional) CephCSI CephFS provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY). - # CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node" - # (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS). - # Put here list of taints you want to tolerate in YAML format. - # CSI provisioner would be best to start on the same nodes as other ceph daemons. - # CSI_CEPHFS_PROVISIONER_TOLERATIONS: | - # - key: node.rook.io/cephfs - # operator: Exists - # (Optional) CephCSI CephFS plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY). - # CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node" - # (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS). - # Put here list of taints you want to tolerate in YAML format. - # CSI plugins need to be started on all the nodes where the clients need to mount the storage. - # CSI_CEPHFS_PLUGIN_TOLERATIONS: | - # - key: node.rook.io/cephfs - # operator: Exists - - # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource - # requests and limits you want to apply for provisioner pod - # CSI_RBD_PROVISIONER_RESOURCE: | - # - name : csi-provisioner - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-resizer - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-attacher - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-snapshotter - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-rbdplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource - # requests and limits you want to apply for plugin pod - # CSI_RBD_PLUGIN_RESOURCE: | - # - name : driver-registrar - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # - name : csi-rbdplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource - # requests and limits you want to apply for provisioner pod - # CSI_CEPHFS_PROVISIONER_RESOURCE: | - # - name : csi-provisioner - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-resizer - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-attacher - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-cephfsplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource - # requests and limits you want to apply for plugin pod - # CSI_CEPHFS_PLUGIN_RESOURCE: | - # - name : driver-registrar - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # - name : csi-cephfsplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - - # Configure CSI Ceph FS grpc and liveness metrics port - # CSI_CEPHFS_GRPC_METRICS_PORT: "9091" - # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081" - # Configure CSI RBD grpc and liveness metrics port - # CSI_RBD_GRPC_METRICS_PORT: "9090" - # CSI_RBD_LIVENESS_METRICS_PORT: "9080" - - # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used - ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true" - - # Whether to enable the flex driver. By default it is enabled and is fully supported, but will be deprecated in some future release - # in favor of the CSI driver. - ROOK_ENABLE_FLEX_DRIVER: "false" - # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster. - # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs. - ROOK_ENABLE_DISCOVERY_DAEMON: "false" - # Enable volume replication controller - CSI_ENABLE_VOLUME_REPLICATION: "false" - # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15. - ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" - # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" - - # (Optional) Admission controller NodeAffinity. - # ADMISSION_CONTROLLER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" - # (Optional) Admission controller tolerations list. Put here list of taints you want to tolerate in YAML format. - # Admission controller would be best to start on the same nodes as other ceph daemons. - # ADMISSION_CONTROLLER_TOLERATIONS: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists ---- -# The deployment for the rook operator -# OLM: BEGIN OPERATOR DEPLOYMENT -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-ceph-operator - namespace: rook-ceph # namespace:operator - labels: - operator: rook - storage-backend: ceph -spec: - selector: - matchLabels: - app: rook-ceph-operator - replicas: 1 - template: - metadata: - labels: - app: rook-ceph-operator - spec: - serviceAccountName: rook-ceph-system - containers: - - name: rook-ceph-operator - image: rook/ceph:v1.7.2 - args: ["ceph", "operator"] - volumeMounts: - - mountPath: /var/lib/rook - name: rook-config - - mountPath: /etc/ceph - name: default-config-dir - env: - - name: ROOK_CURRENT_NAMESPACE_ONLY - value: "false" - # Rook Agent toleration. Will tolerate all taints with all keys. - # Choose between NoSchedule, PreferNoSchedule and NoExecute: - # - name: AGENT_TOLERATION - # value: "NoSchedule" - # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate - # - name: AGENT_TOLERATION_KEY - # value: "" - # (Optional) Rook Agent priority class name to set on the pod(s) - # - name: AGENT_PRIORITY_CLASS_NAME - # value: "" - # (Optional) Rook Agent NodeAffinity. - # - name: AGENT_NODE_AFFINITY - # value: "role=storage-node; storage=rook,ceph" - # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`. - # `Any` uses Ceph admin credentials by default/fallback. - # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and - # set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name. - # to the namespace in which the `mountSecret` Kubernetes secret namespace. - # - name: AGENT_MOUNT_SECURITY_MODE - # value: "Any" - # Set the path where the Rook agent can find the flex volumes - - name: FLEXVOLUME_DIR_PATH - value: "/etc/kubernetes/kubelet-plugins/volume/exec" - # Set the path where kernel modules can be found - # - name: LIB_MODULES_DIR_PATH - # value: "" - # Mount any extra directories into the agent container - # - name: AGENT_MOUNTS - # value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2" - # Rook Discover toleration. Will tolerate all taints with all keys. - # Choose between NoSchedule, PreferNoSchedule and NoExecute: - # - name: DISCOVER_TOLERATION - # value: "NoSchedule" - # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate - # - name: DISCOVER_TOLERATION_KEY - # value: "" - # (Optional) Rook Discover priority class name to set on the pod(s) - # - name: DISCOVER_PRIORITY_CLASS_NAME - # value: "" - # (Optional) Discover Agent NodeAffinity. - # - name: DISCOVER_AGENT_NODE_AFFINITY - # value: "role=storage-node; storage=rook, ceph" - # (Optional) Discover Agent Pod Labels. - # - name: DISCOVER_AGENT_POD_LABELS - # value: "key1=value1,key2=value2" - - # The duration between discovering devices in the rook-discover daemonset. - - name: ROOK_DISCOVER_DEVICES_INTERVAL - value: "60m" - # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods. - # Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues. - # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641 - - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED - value: "true" - # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins). - # Disable it here if you have similar issues. - # For more details see https://github.com/rook/rook/issues/2417 - - name: ROOK_ENABLE_SELINUX_RELABELING - value: "true" - # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues. - # For more details see https://github.com/rook/rook/issues/2254 - - name: ROOK_ENABLE_FSGROUP - value: "true" - # Disable automatic orchestration when new devices are discovered - - name: ROOK_DISABLE_DEVICE_HOTPLUG - value: "false" - # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+". - # In case of more than one regex, use comma to separate between them. - # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" - # add regex expression after putting a comma to blacklist a disk - # If value is empty, the default regex will be used. - - name: DISCOVER_DAEMON_UDEV_BLACKLIST - value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" - - # Whether to start machineDisruptionBudget and machineLabel controller to watch for the osd pods and MDBs. - - name: ROOK_ENABLE_MACHINE_DISRUPTION_BUDGET - value: "false" - - # Time to wait until the node controller will move Rook pods to other - # nodes after detecting an unreachable node. - # Pods affected by this setting are: - # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox - # The value used in this variable replaces the default value of 300 secs - # added automatically by k8s as Toleration for - # - # The total amount of time to reschedule Rook pods in healthy nodes - # before detecting a condition will be the sum of: - # --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag) - # --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds - - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS - value: "5" - - # The name of the node to pass with the downward API - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # The pod name to pass with the downward API - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - # The pod namespace to pass with the downward API - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - # Recommended resource requests and limits, if desired - #resources: - # limits: - # cpu: 500m - # memory: 256Mi - # requests: - # cpu: 100m - # memory: 128Mi - - # Uncomment it to run lib bucket provisioner in multithreaded mode - #- name: LIB_BUCKET_PROVISIONER_THREADS - # value: "5" - - volumes: - - name: rook-config - emptyDir: {} - - name: default-config-dir - emptyDir: {} -# OLM: END OPERATOR DEPLOYMENT diff --git a/cluster/examples/kubernetes/ceph/operator.yaml b/cluster/examples/kubernetes/ceph/operator.yaml deleted file mode 100644 index ffcb8d350..000000000 --- a/cluster/examples/kubernetes/ceph/operator.yaml +++ /dev/null @@ -1,525 +0,0 @@ -################################################################################################################# -# The deployment for the rook operator -# Contains the common settings for most Kubernetes deployments. -# For example, to create the rook-ceph cluster: -# kubectl create -f crds.yaml -f common.yaml -f operator.yaml -# kubectl create -f cluster.yaml -# -# Also see other operator sample files for variations of operator.yaml: -# - operator-openshift.yaml: Common settings for running in OpenShift -############################################################################################################### - -# Rook Ceph Operator Config ConfigMap -# Use this ConfigMap to override Rook-Ceph Operator configurations. -# NOTE! Precedence will be given to this config if the same Env Var config also exists in the -# Operator Deployment. -# To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config -# here. It is recommended to then remove it from the Deployment to eliminate any future confusion. -kind: ConfigMap -apiVersion: v1 -metadata: - name: rook-ceph-operator-config - # should be in the namespace of the operator - namespace: rook-ceph # namespace:operator -data: - # The logging level for the operator: INFO | DEBUG - ROOK_LOG_LEVEL: "INFO" - - # Enable the CSI driver. - # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml - ROOK_CSI_ENABLE_CEPHFS: "true" - # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below. - ROOK_CSI_ENABLE_RBD: "true" - ROOK_CSI_ENABLE_GRPC_METRICS: "false" - - # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary - # in some network configurations where the SDN does not provide access to an external cluster or - # there is significant drop in read/write performance. - # CSI_ENABLE_HOST_NETWORK: "true" - - # Set logging level for csi containers. - # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. - # CSI_LOG_LEVEL: "0" - - # OMAP generator will generate the omap mapping between the PV name and the RBD image. - # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature. - # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable - # it set it to false. - # CSI_ENABLE_OMAP_GENERATOR: "false" - - # set to false to disable deployment of snapshotter container in CephFS provisioner pod. - CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true" - - # set to false to disable deployment of snapshotter container in RBD provisioner pod. - CSI_ENABLE_RBD_SNAPSHOTTER: "true" - - # Enable cephfs kernel driver instead of ceph-fuse. - # If you disable the kernel client, your application may be disrupted during upgrade. - # See the upgrade guide: https://rook.io/docs/rook/master/ceph-upgrade.html - # NOTE! cephfs quota is not supported in kernel version < 4.17 - CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true" - - # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted. - # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html - CSI_RBD_FSGROUPPOLICY: "ReadWriteOnceWithFSType" - - # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted. - # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html - CSI_CEPHFS_FSGROUPPOLICY: "None" - - # (Optional) Allow starting unsupported ceph-csi image - ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false" - # The default version of CSI supported by Rook will be started. To change the version - # of the CSI driver to something other than what is officially supported, change - # these images to the desired release of the CSI driver. - # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.4.0" - # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0" - # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.2.0" - # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2" - # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1" - # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.2.1" - - # (Optional) set user created priorityclassName for csi plugin pods. - # CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical" - - # (Optional) set user created priorityclassName for csi provisioner pods. - # CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical" - - # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. - # Default value is RollingUpdate. - # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete" - # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. - # Default value is RollingUpdate. - # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete" - - # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path. - # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet" - - # Labels to add to the CSI CephFS Deployments and DaemonSets Pods. - # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2" - # Labels to add to the CSI RBD Deployments and DaemonSets Pods. - # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2" - - # (Optional) CephCSI provisioner NodeAffinity(applied to both CephFS and RBD provisioner). - # CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" - # (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner). - # Put here list of taints you want to tolerate in YAML format. - # CSI provisioner would be best to start on the same nodes as other ceph daemons. - # CSI_PROVISIONER_TOLERATIONS: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - # (Optional) CephCSI plugin NodeAffinity(applied to both CephFS and RBD plugin). - # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" - # (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin). - # Put here list of taints you want to tolerate in YAML format. - # CSI plugins need to be started on all the nodes where the clients need to mount the storage. - # CSI_PLUGIN_TOLERATIONS: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - - # (Optional) CephCSI RBD provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY). - # CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node" - # (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS). - # Put here list of taints you want to tolerate in YAML format. - # CSI provisioner would be best to start on the same nodes as other ceph daemons. - # CSI_RBD_PROVISIONER_TOLERATIONS: | - # - key: node.rook.io/rbd - # operator: Exists - # (Optional) CephCSI RBD plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY). - # CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node" - # (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS). - # Put here list of taints you want to tolerate in YAML format. - # CSI plugins need to be started on all the nodes where the clients need to mount the storage. - # CSI_RBD_PLUGIN_TOLERATIONS: | - # - key: node.rook.io/rbd - # operator: Exists - - # (Optional) CephCSI CephFS provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY). - # CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node" - # (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS). - # Put here list of taints you want to tolerate in YAML format. - # CSI provisioner would be best to start on the same nodes as other ceph daemons. - # CSI_CEPHFS_PROVISIONER_TOLERATIONS: | - # - key: node.rook.io/cephfs - # operator: Exists - # (Optional) CephCSI CephFS plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY). - # CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node" - # (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS). - # Put here list of taints you want to tolerate in YAML format. - # CSI plugins need to be started on all the nodes where the clients need to mount the storage. - # CSI_CEPHFS_PLUGIN_TOLERATIONS: | - # - key: node.rook.io/cephfs - # operator: Exists - - # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource - # requests and limits you want to apply for provisioner pod - # CSI_RBD_PROVISIONER_RESOURCE: | - # - name : csi-provisioner - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-resizer - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-attacher - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-snapshotter - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-rbdplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource - # requests and limits you want to apply for plugin pod - # CSI_RBD_PLUGIN_RESOURCE: | - # - name : driver-registrar - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # - name : csi-rbdplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource - # requests and limits you want to apply for provisioner pod - # CSI_CEPHFS_PROVISIONER_RESOURCE: | - # - name : csi-provisioner - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-resizer - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-attacher - # resource: - # requests: - # memory: 128Mi - # cpu: 100m - # limits: - # memory: 256Mi - # cpu: 200m - # - name : csi-cephfsplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource - # requests and limits you want to apply for plugin pod - # CSI_CEPHFS_PLUGIN_RESOURCE: | - # - name : driver-registrar - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - # - name : csi-cephfsplugin - # resource: - # requests: - # memory: 512Mi - # cpu: 250m - # limits: - # memory: 1Gi - # cpu: 500m - # - name : liveness-prometheus - # resource: - # requests: - # memory: 128Mi - # cpu: 50m - # limits: - # memory: 256Mi - # cpu: 100m - - # Configure CSI CSI Ceph FS grpc and liveness metrics port - # CSI_CEPHFS_GRPC_METRICS_PORT: "9091" - # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081" - # Configure CSI RBD grpc and liveness metrics port - # CSI_RBD_GRPC_METRICS_PORT: "9090" - # CSI_RBD_LIVENESS_METRICS_PORT: "9080" - - # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used - ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true" - - # Whether to enable the flex driver. By default it is enabled and is fully supported, but will be deprecated in some future release - # in favor of the CSI driver. - ROOK_ENABLE_FLEX_DRIVER: "false" - # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster. - # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs. - ROOK_ENABLE_DISCOVERY_DAEMON: "false" - # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15. - ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" - # Enable volume replication controller - CSI_ENABLE_VOLUME_REPLICATION: "false" - # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" - - # (Optional) Admission controller NodeAffinity. - # ADMISSION_CONTROLLER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" - # (Optional) Admission controller tolerations list. Put here list of taints you want to tolerate in YAML format. - # Admission controller would be best to start on the same nodes as other ceph daemons. - # ADMISSION_CONTROLLER_TOLERATIONS: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists ---- -# OLM: BEGIN OPERATOR DEPLOYMENT -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-ceph-operator - namespace: rook-ceph # namespace:operator - labels: - operator: rook - storage-backend: ceph -spec: - selector: - matchLabels: - app: rook-ceph-operator - replicas: 1 - template: - metadata: - labels: - app: rook-ceph-operator - spec: - serviceAccountName: rook-ceph-system - containers: - - name: rook-ceph-operator - image: rook/ceph:v1.7.2 - args: ["ceph", "operator"] - volumeMounts: - - mountPath: /var/lib/rook - name: rook-config - - mountPath: /etc/ceph - name: default-config-dir - env: - # If the operator should only watch for cluster CRDs in the same namespace, set this to "true". - # If this is not set to true, the operator will watch for cluster CRDs in all namespaces. - - name: ROOK_CURRENT_NAMESPACE_ONLY - value: "false" - # Rook Agent toleration. Will tolerate all taints with all keys. - # Choose between NoSchedule, PreferNoSchedule and NoExecute: - # - name: AGENT_TOLERATION - # value: "NoSchedule" - # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate - # - name: AGENT_TOLERATION_KEY - # value: "" - # (Optional) Rook Agent tolerations list. Put here list of taints you want to tolerate in YAML format. - # - name: AGENT_TOLERATIONS - # value: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - # (Optional) Rook Agent priority class name to set on the pod(s) - # - name: AGENT_PRIORITY_CLASS_NAME - # value: "" - # (Optional) Rook Agent NodeAffinity. - # - name: AGENT_NODE_AFFINITY - # value: "role=storage-node; storage=rook,ceph" - # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`. - # `Any` uses Ceph admin credentials by default/fallback. - # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and - # set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name. - # to the namespace in which the `mountSecret` Kubernetes secret namespace. - # - name: AGENT_MOUNT_SECURITY_MODE - # value: "Any" - # Set the path where the Rook agent can find the flex volumes - # - name: FLEXVOLUME_DIR_PATH - # value: "" - # Set the path where kernel modules can be found - # - name: LIB_MODULES_DIR_PATH - # value: "" - # Mount any extra directories into the agent container - # - name: AGENT_MOUNTS - # value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2" - # Rook Discover toleration. Will tolerate all taints with all keys. - # Choose between NoSchedule, PreferNoSchedule and NoExecute: - # - name: DISCOVER_TOLERATION - # value: "NoSchedule" - # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate - # - name: DISCOVER_TOLERATION_KEY - # value: "" - # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format. - # - name: DISCOVER_TOLERATIONS - # value: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - # (Optional) Rook Discover priority class name to set on the pod(s) - # - name: DISCOVER_PRIORITY_CLASS_NAME - # value: "" - # (Optional) Discover Agent NodeAffinity. - # - name: DISCOVER_AGENT_NODE_AFFINITY - # value: "role=storage-node; storage=rook, ceph" - # (Optional) Discover Agent Pod Labels. - # - name: DISCOVER_AGENT_POD_LABELS - # value: "key1=value1,key2=value2" - - # The duration between discovering devices in the rook-discover daemonset. - - name: ROOK_DISCOVER_DEVICES_INTERVAL - value: "60m" - - # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods. - # Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues. - # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641 - - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED - value: "false" - - # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins). - # Disable it here if you have similar issues. - # For more details see https://github.com/rook/rook/issues/2417 - - name: ROOK_ENABLE_SELINUX_RELABELING - value: "true" - - # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues. - # For more details see https://github.com/rook/rook/issues/2254 - - name: ROOK_ENABLE_FSGROUP - value: "true" - - # Disable automatic orchestration when new devices are discovered - - name: ROOK_DISABLE_DEVICE_HOTPLUG - value: "false" - - # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+". - # In case of more than one regex, use comma to separate between them. - # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" - # Add regex expression after putting a comma to blacklist a disk - # If value is empty, the default regex will be used. - - name: DISCOVER_DAEMON_UDEV_BLACKLIST - value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" - - # Time to wait until the node controller will move Rook pods to other - # nodes after detecting an unreachable node. - # Pods affected by this setting are: - # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox - # The value used in this variable replaces the default value of 300 secs - # added automatically by k8s as Toleration for - # - # The total amount of time to reschedule Rook pods in healthy nodes - # before detecting a condition will be the sum of: - # --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag) - # --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds - - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS - value: "5" - - # The name of the node to pass with the downward API - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # The pod name to pass with the downward API - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - # The pod namespace to pass with the downward API - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - # Recommended resource requests and limits, if desired - #resources: - # limits: - # cpu: 500m - # memory: 256Mi - # requests: - # cpu: 100m - # memory: 128Mi - - # Uncomment it to run lib bucket provisioner in multithreaded mode - #- name: LIB_BUCKET_PROVISIONER_THREADS - # value: "5" - - # Uncomment it to run rook operator on the host network - #hostNetwork: true - volumes: - - name: rook-config - emptyDir: {} - - name: default-config-dir - emptyDir: {} -# OLM: END OPERATOR DEPLOYMENT diff --git a/cluster/examples/kubernetes/ceph/osd-purge.yaml b/cluster/examples/kubernetes/ceph/osd-purge.yaml deleted file mode 100644 index a0f21d20a..000000000 --- a/cluster/examples/kubernetes/ceph/osd-purge.yaml +++ /dev/null @@ -1,74 +0,0 @@ -################################################################################################################# -# We need many operations to remove OSDs as written in Documentation/ceph-osd-mgmt.md. -# This job can automate some of that operations: mark OSDs as `out`, purge these OSDs, -# and delete the corresponding resources like OSD deployments, OSD prepare jobs, and PVCs. -# -# Please note the following. -# -# - This job only works for `down` OSDs. -# - This job doesn't wait for backfilling to be completed. -# -# If you want to remove `up` OSDs and/or want to wait for backfilling to be completed between each OSD removal, -# please do it by hand. -################################################################################################################# - -apiVersion: batch/v1 -kind: Job -metadata: - name: rook-ceph-purge-osd - namespace: rook-ceph # namespace:operator - labels: - app: rook-ceph-purge-osd -spec: - template: - spec: - serviceAccountName: rook-ceph-purge-osd - containers: - - name: osd-removal - image: rook/ceph:v1.7.2 - # TODO: Insert the OSD ID in the last parameter that is to be removed - # The OSD IDs are a comma-separated list. For example: "0" or "0,2". - # If you want to preserve the OSD PVCs, set `--preserve-pvc true`. - args: ["ceph", "osd", "remove", "--preserve-pvc", "false", "--osd-ids", ""] - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: ROOK_MON_ENDPOINTS - valueFrom: - configMapKeyRef: - key: data - name: rook-ceph-mon-endpoints - - name: ROOK_CEPH_USERNAME - valueFrom: - secretKeyRef: - key: ceph-username - name: rook-ceph-mon - - name: ROOK_CEPH_SECRET - valueFrom: - secretKeyRef: - key: ceph-secret - name: rook-ceph-mon - - name: ROOK_CONFIG_DIR - value: /var/lib/rook - - name: ROOK_CEPH_CONFIG_OVERRIDE - value: /etc/rook/config/override.conf - - name: ROOK_FSID - valueFrom: - secretKeyRef: - key: fsid - name: rook-ceph-mon - - name: ROOK_LOG_LEVEL - value: DEBUG - volumeMounts: - - mountPath: /etc/ceph - name: ceph-conf-emptydir - - mountPath: /var/lib/rook - name: rook-config - volumes: - - emptyDir: {} - name: ceph-conf-emptydir - - emptyDir: {} - name: rook-config - restartPolicy: Never diff --git a/cluster/examples/kubernetes/ceph/pool-ec.yaml b/cluster/examples/kubernetes/ceph/pool-ec.yaml deleted file mode 100644 index d98f964e5..000000000 --- a/cluster/examples/kubernetes/ceph/pool-ec.yaml +++ /dev/null @@ -1,26 +0,0 @@ -################################################################################################################# -# Create a Ceph pool with settings for erasure coding. A minimum of 3 OSDs are required in this example. -# kubectl create -f pool-ec.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: ec-pool - namespace: rook-ceph # namespace:cluster -spec: - # The failure domain will spread the replicas of the data across different failure zones - failureDomain: osd - # Make sure you have enough OSDs to support the replica size or sum of the erasure coding and data chunks. - # This is the minimal example that requires only 3 OSDs. - erasureCoded: - dataChunks: 2 - codingChunks: 1 - # Set any property on a given pool - # see https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values - parameters: - # Inline compression mode for the data pool - compression_mode: none - # A key/value list of annotations - annotations: - # key: value diff --git a/cluster/examples/kubernetes/ceph/pool-test.yaml b/cluster/examples/kubernetes/ceph/pool-test.yaml deleted file mode 100644 index 301271d55..000000000 --- a/cluster/examples/kubernetes/ceph/pool-test.yaml +++ /dev/null @@ -1,14 +0,0 @@ -################################################################################################################# -# Create a Ceph pool with settings a test environment. Only a single OSD is required. -# kubectl create -f pool-test.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph # namespace:cluster -spec: - failureDomain: osd - replicated: - size: 1 diff --git a/cluster/examples/kubernetes/ceph/pool.yaml b/cluster/examples/kubernetes/ceph/pool.yaml deleted file mode 100644 index da3c7ebd3..000000000 --- a/cluster/examples/kubernetes/ceph/pool.yaml +++ /dev/null @@ -1,67 +0,0 @@ -################################################################################################################# -# Create a Ceph pool with settings for replication in production environments. A minimum of 3 OSDs on -# different hosts are required in this example. -# kubectl create -f pool.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph # namespace:cluster -spec: - # The failure domain will spread the replicas of the data across different failure zones - failureDomain: host - # For a pool based on raw copies, specify the number of copies. A size of 1 indicates no redundancy. - replicated: - size: 3 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: true - # hybridStorage: - # primaryDeviceClass: ssd - # secondaryDeviceClass: hdd - # The number for replicas per failure domain, the value must be a divisor of the replica count. If specified, the most common value is 2 for stretch clusters, where the replica count would be 4. - # replicasPerFailureDomain: 2 - # The name of the failure domain to place further down replicas - # subFailureDomain: host - # Ceph CRUSH root location of the rule - # For reference: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets - #crushRoot: my-root - # The Ceph CRUSH device class associated with the CRUSH replicated rule - # For reference: https://docs.ceph.com/docs/master/rados/operations/crush-map/#device-classes - #deviceClass: my-class - # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false. - # For reference: https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics - # enableRBDStats: true - # Set any property on a given pool - # see https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values - parameters: - # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - compression_mode: none - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #target_size_ratio: ".5" - mirroring: - enabled: false - # mirroring mode: pool level or per image - # for more details see: https://docs.ceph.com/docs/master/rbd/rbd-mirroring/#enable-mirroring - mode: image - # specify the schedule(s) on which snapshots should be taken - # snapshotSchedules: - # - interval: 24h # daily snapshots - # startTime: 14:00:00-05:00 - # reports pool mirroring status if enabled - statusCheck: - mirror: - disabled: false - interval: 60s - # quota in bytes and/or objects, default value is 0 (unlimited) - # see https://docs.ceph.com/en/latest/rados/operations/pools/#set-pool-quotas - # quotas: - # maxSize: "10Gi" # valid suffixes include k, M, G, T, P, E, Ki, Mi, Gi, Ti, Pi, Ei - # maxObjects: 1000000000 # 1 billion objects - # A key/value list of annotations - annotations: - # key: value diff --git a/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml b/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml deleted file mode 100644 index 80a58ffe2..000000000 --- a/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml +++ /dev/null @@ -1,780 +0,0 @@ -################################################################################################################### -# Create the common resources that are necessary to start the operator and the ceph cluster. -# These resources *must* be created before the operator.yaml and cluster.yaml or their variants. -# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace. -# -# If the operator needs to manage multiple clusters (in different namespaces), see the section below -# for "cluster-specific resources". The resources below that section will need to be created for each namespace -# where the operator needs to manage the cluster. The resources above that section do not be created again. -# -# Most of the sections are prefixed with a 'OLM' keyword which is used to build our CSV for an OLM (Operator Life Cycle manager) -################################################################################################################### ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephclusters.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephCluster - listKind: CephClusterList - plural: cephclusters - singular: cephcluster - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - annotations: {} - cephVersion: - properties: - allowUnsupported: - type: boolean - image: - type: string - dashboard: - properties: - enabled: - type: boolean - urlPrefix: - type: string - port: - type: integer - minimum: 0 - maximum: 65535 - ssl: - type: boolean - dataDirHostPath: - pattern: ^/(\S+) - type: string - disruptionManagement: - properties: - machineDisruptionBudgetNamespace: - type: string - managePodBudgets: - type: boolean - osdMaintenanceTimeout: - type: integer - pgHealthCheckTimeout: - type: integer - manageMachineDisruptionBudgets: - type: boolean - skipUpgradeChecks: - type: boolean - continueUpgradeAfterChecksEvenIfNotHealthy: - type: boolean - waitTimeoutForHealthyOSDInMinutes: - type: integer - mon: - properties: - allowMultiplePerNode: - type: boolean - count: - maximum: 9 - minimum: 0 - type: integer - volumeClaimTemplate: {} - mgr: - properties: - count: - type: integer - minimum: 0 - maximum: 2 - modules: - items: - properties: - name: - type: string - enabled: - type: boolean - network: - properties: - hostNetwork: - type: boolean - provider: - type: string - selectors: {} - storage: - properties: - disruptionManagement: - properties: - machineDisruptionBudgetNamespace: - type: string - managePodBudgets: - type: boolean - osdMaintenanceTimeout: - type: integer - pgHealthCheckTimeout: - type: integer - manageMachineDisruptionBudgets: - type: boolean - useAllNodes: - type: boolean - nodes: - items: - properties: - name: - type: string - config: - properties: - metadataDevice: - type: string - storeType: - type: string - pattern: ^(bluestore)$ - databaseSizeMB: - type: string - walSizeMB: - type: string - journalSizeMB: - type: string - osdsPerDevice: - type: string - encryptedDevice: - type: string - pattern: ^(true|false)$ - useAllDevices: - type: boolean - deviceFilter: - type: string - devicePathFilter: - type: string - devices: - type: array - items: - properties: - name: - type: string - config: {} - resources: {} - useAllDevices: - type: boolean - deviceFilter: - type: string - devicePathFilter: - type: string - config: {} - storageClassDeviceSets: {} - monitoring: - properties: - enabled: - type: boolean - rulesNamespace: - type: string - externalMgrEndpoints: - type: array - items: - properties: - ip: - type: string - removeOSDsIfOutAndSafeToRemove: - type: boolean - external: - properties: - enable: - type: boolean - cleanupPolicy: - properties: - confirmation: - type: string - pattern: ^$|^yes-really-destroy-data$ - sanitizeDisks: - properties: - method: - type: string - pattern: ^(complete|quick)$ - dataSource: - type: string - pattern: ^(zero|random)$ - iteration: - type: integer - format: int32 - security: {} - logCollector: {} - placement: {} - resources: {} - healthCheck: {} - subresources: - status: {} - additionalPrinterColumns: - - name: DataDirHostPath - type: string - description: Directory used on the K8s nodes - JSONPath: .spec.dataDirHostPath - - name: MonCount - type: string - description: Number of MONs - JSONPath: .spec.mon.count - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - - name: Phase - type: string - description: Phase - JSONPath: .status.phase - - name: Message - type: string - description: Message - JSONPath: .status.message - - name: Health - type: string - description: Ceph Health - JSONPath: .status.ceph.health - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephclients.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephClient - listKind: CephClientList - plural: cephclients - singular: cephclient - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - caps: - type: object - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephrbdmirrors.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephRBDMirror - listKind: CephRBDMirrorList - plural: cephrbdmirrors - singular: cephrbdmirror - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - count: - type: integer - minimum: 1 - maximum: 100 - peers: - properties: - secretNames: - type: array - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephfilesystems.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephFilesystem - listKind: CephFilesystemList - plural: cephfilesystems - singular: cephfilesystem - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - metadataServer: - properties: - activeCount: - minimum: 1 - maximum: 10 - type: integer - activeStandby: - type: boolean - annotations: {} - placement: {} - resources: {} - metadataPool: - properties: - failureDomain: - type: string - crushRoot: - type: string - replicated: - properties: - size: - minimum: 0 - maximum: 10 - type: integer - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - minimum: 0 - maximum: 10 - type: integer - codingChunks: - minimum: 0 - maximum: 10 - type: integer - compressionMode: - type: string - enum: - - "" - - none - - passive - - aggressive - - force - dataPools: - type: array - items: - properties: - failureDomain: - type: string - crushRoot: - type: string - replicated: - properties: - size: - minimum: 0 - maximum: 10 - type: integer - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - minimum: 0 - maximum: 10 - type: integer - codingChunks: - minimum: 0 - maximum: 10 - type: integer - compressionMode: - type: string - enum: - - "" - - none - - passive - - aggressive - - force - parameters: - type: object - preservePoolsOnDelete: - type: boolean - preserveFilesystemOnDelete: - type: boolean - additionalPrinterColumns: - - name: ActiveMDS - type: string - description: Number of desired active MDS daemons - JSONPath: .spec.metadataServer.activeCount - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephnfses.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephNFS - listKind: CephNFSList - plural: cephnfses - singular: cephnfs - shortNames: - - nfs - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - rados: - properties: - pool: - type: string - namespace: - type: string - server: - properties: - active: - type: integer - annotations: {} - placement: {} - resources: {} - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectstores.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectStore - listKind: CephObjectStoreList - plural: cephobjectstores - singular: cephobjectstore - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - gateway: - properties: - type: - type: string - sslCertificateRef: {} - port: - type: integer - minimum: 0 - maximum: 65535 - securePort: - type: integer - minimum: 0 - maximum: 65535 - instances: - type: integer - externalRgwEndpoints: - type: array - items: - properties: - ip: - type: string - annotations: {} - placement: {} - resources: {} - metadataPool: - properties: - failureDomain: - type: string - crushRoot: - type: string - replicated: - properties: - size: - type: integer - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - type: integer - codingChunks: - type: integer - compressionMode: - type: string - enum: - - "" - - none - - passive - - aggressive - - force - parameters: - type: object - dataPool: - properties: - failureDomain: - type: string - crushRoot: - type: string - replicated: - properties: - size: - type: integer - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - type: integer - codingChunks: - type: integer - compressionMode: - type: string - enum: - - "" - - none - - passive - - aggressive - - force - parameters: - type: object - preservePoolsOnDelete: - type: boolean - healthCheck: - properties: - bucket: - properties: - disabled: - type: boolean - interval: - type: string - timeout: - type: string - livenessProbe: - type: object - properties: - disabled: - type: boolean - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectstoreusers.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectStoreUser - listKind: CephObjectStoreUserList - plural: cephobjectstoreusers - singular: cephobjectstoreuser - shortNames: - - rcou - - objectuser - scope: Namespaced - version: v1 - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectrealms.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectRealm - listKind: CephObjectRealmList - plural: cephobjectrealms - singular: cephobjectrealm - scope: Namespaced - version: v1 - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectzonegroups.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectZoneGroup - listKind: CephObjectZoneGroupList - plural: cephobjectzonegroups - singular: cephobjectzonegroup - scope: Namespaced - version: v1 - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectzones.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectZone - listKind: CephObjectZoneList - plural: cephobjectzones - singular: cephobjectzone - scope: Namespaced - version: v1 - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephblockpools.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephBlockPool - listKind: CephBlockPoolList - plural: cephblockpools - singular: cephblockpool - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - failureDomain: - type: string - crushRoot: - type: string - replicated: - properties: - size: - type: integer - minimum: 0 - maximum: 9 - targetSizeRatio: - type: number - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - type: integer - minimum: 0 - maximum: 9 - codingChunks: - type: integer - minimum: 0 - maximum: 9 - compressionMode: - type: string - enum: - - "" - - none - - passive - - aggressive - - force - enableRBDStats: - description: EnableRBDStats is used to enable gathering of statistics - for all RBD images in the pool - type: boolean - parameters: - type: object - mirroring: - properties: - enabled: - type: boolean - mode: - type: string - enum: - - image - - pool - peers: - properties: - secretNames: - type: array - snapshotSchedules: - type: object - properties: - interval: - type: string - startTime: - type: string - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: volumes.rook.io -spec: - group: rook.io - names: - kind: Volume - listKind: VolumeList - plural: volumes - singular: volume - shortNames: - - rv - scope: Namespaced - version: v1alpha2 - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: objectbuckets.objectbucket.io -spec: - group: objectbucket.io - versions: - - name: v1alpha1 - served: true - storage: true - names: - kind: ObjectBucket - listKind: ObjectBucketList - plural: objectbuckets - singular: objectbucket - shortNames: - - ob - - obs - scope: Cluster - subresources: - status: {} - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: objectbucketclaims.objectbucket.io -spec: - versions: - - name: v1alpha1 - served: true - storage: true - group: objectbucket.io - names: - kind: ObjectBucketClaim - listKind: ObjectBucketClaimList - plural: objectbucketclaims - singular: objectbucketclaim - shortNames: - - obc - - obcs - scope: Namespaced - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephfilesystemmirrors.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephFilesystemMirror - listKind: CephFilesystemMirrorList - plural: cephfilesystemmirrors - singular: cephfilesystemmirror - scope: Namespaced - version: v1 - subresources: - status: {} diff --git a/cluster/examples/kubernetes/ceph/rbdmirror.yaml b/cluster/examples/kubernetes/ceph/rbdmirror.yaml deleted file mode 100644 index cef63175f..000000000 --- a/cluster/examples/kubernetes/ceph/rbdmirror.yaml +++ /dev/null @@ -1,45 +0,0 @@ -################################################################################################################# -# Create rbd-mirror daemon(s) -# kubectl create -f rbdmirror.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephRBDMirror -metadata: - name: my-rbd-mirror - namespace: rook-ceph # namespace:cluster -spec: - # the number of rbd-mirror daemons to deploy - count: 1 - # list of Kubernetes Secrets containing the peer token - # for more details see: https://docs.ceph.com/docs/master/rbd/rbd-mirroring/#bootstrap-peers - #peers: - #secretNames: - #- secondary-cluster-peer - # The affinity rules to apply to the rbd-mirror deployment - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - rbd-mirror-node - # tolerations: - # - key: rbd-mirror-node - # operator: Exists - # podAffinity: - # podAntiAffinity: - # A key/value list of annotations - annotations: - # key: value - resources: - # The requests and limits, for example to allow the rbd-mirror pod(s) to use half of one CPU core and 1 gigabyte of memory - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # priorityClassName: my-priority-class diff --git a/cluster/examples/kubernetes/ceph/rgw-external.yaml b/cluster/examples/kubernetes/ceph/rgw-external.yaml deleted file mode 100644 index 8897c0fd6..000000000 --- a/cluster/examples/kubernetes/ceph/rgw-external.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: rook-ceph-rgw-my-store-external - namespace: rook-ceph # namespace:cluster - labels: - app: rook-ceph-rgw - rook_cluster: rook-ceph # namespace:cluster - rook_object_store: my-store -spec: - ports: - - name: rgw - port: 80 # service port mentioned in object store crd - protocol: TCP - targetPort: 8080 - selector: - app: rook-ceph-rgw - rook_cluster: rook-ceph # namespace:cluster - rook_object_store: my-store - sessionAffinity: None - type: NodePort diff --git a/cluster/examples/kubernetes/ceph/scc.yaml b/cluster/examples/kubernetes/ceph/scc.yaml deleted file mode 100644 index 88dc4883f..000000000 --- a/cluster/examples/kubernetes/ceph/scc.yaml +++ /dev/null @@ -1,81 +0,0 @@ -# scc for the Rook and Ceph daemons -kind: SecurityContextConstraints -apiVersion: security.openshift.io/v1 -metadata: - name: rook-ceph -allowPrivilegedContainer: true -allowHostNetwork: true -allowHostDirVolumePlugin: true -priority: -allowedCapabilities: [] -allowHostPorts: true -allowHostPID: true -allowHostIPC: true -readOnlyRootFilesystem: false -requiredDropCapabilities: [] -defaultAddCapabilities: [] -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -fsGroup: - type: MustRunAs -supplementalGroups: - type: RunAsAny -allowedFlexVolumes: - - driver: "ceph.rook.io/rook" - - driver: "ceph.rook.io/rook-ceph" -volumes: - - configMap - - downwardAPI - - emptyDir - - flexVolume - - hostPath - - persistentVolumeClaim - - projected - - secret -users: - # A user needs to be added for each rook service account. - # This assumes running in the default sample "rook-ceph" namespace. - # If other namespaces or service accounts are configured, they need to be updated here. - - system:serviceaccount:rook-ceph:rook-ceph-system # serviceaccount:namespace:operator - - system:serviceaccount:rook-ceph:default # serviceaccount:namespace:cluster - - system:serviceaccount:rook-ceph:rook-ceph-mgr # serviceaccount:namespace:cluster - - system:serviceaccount:rook-ceph:rook-ceph-osd # serviceaccount:namespace:cluster ---- -# scc for the CSI driver -kind: SecurityContextConstraints -apiVersion: security.openshift.io/v1 -metadata: - name: rook-ceph-csi -allowPrivilegedContainer: true -allowHostNetwork: true -allowHostDirVolumePlugin: true -priority: -allowedCapabilities: ['*'] -allowHostPorts: true -allowHostPID: true -allowHostIPC: true -readOnlyRootFilesystem: false -requiredDropCapabilities: [] -defaultAddCapabilities: [] -runAsUser: - type: RunAsAny -seLinuxContext: - type: RunAsAny -fsGroup: - type: RunAsAny -supplementalGroups: - type: RunAsAny -allowedFlexVolumes: - - driver: "ceph.rook.io/rook" - - driver: "ceph.rook.io/rook-ceph" -volumes: ['*'] -users: - # A user needs to be added for each rook service account. - # This assumes running in the default sample "rook-ceph" namespace. - # If other namespaces or service accounts are configured, they need to be updated here. - - system:serviceaccount:rook-ceph:rook-csi-rbd-plugin-sa # serviceaccount:namespace:operator - - system:serviceaccount:rook-ceph:rook-csi-rbd-provisioner-sa # serviceaccount:namespace:operator - - system:serviceaccount:rook-ceph:rook-csi-cephfs-plugin-sa # serviceaccount:namespace:operator - - system:serviceaccount:rook-ceph:rook-csi-cephfs-provisioner-sa # serviceaccount:namespace:operator diff --git a/cluster/examples/kubernetes/ceph/storageclass-bucket-delete.yaml b/cluster/examples/kubernetes/ceph/storageclass-bucket-delete.yaml deleted file mode 100644 index 63b25fabd..000000000 --- a/cluster/examples/kubernetes/ceph/storageclass-bucket-delete.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-delete-bucket -provisioner: rook-ceph.ceph.rook.io/bucket # driver:namespace:cluster -# set the reclaim policy to delete the bucket and all objects -# when its OBC is deleted. -reclaimPolicy: Delete -parameters: - objectStoreName: my-store - objectStoreNamespace: rook-ceph # namespace:cluster - region: us-east-1 - # To accommodate brownfield cases reference the existing bucket name here instead - # of in the ObjectBucketClaim (OBC). In this case the provisioner will grant - # access to the bucket by creating a new user, attaching it to the bucket, and - # providing the credentials via a Secret in the namespace of the requesting OBC. - #bucketName: diff --git a/cluster/examples/kubernetes/ceph/storageclass-bucket-retain.yaml b/cluster/examples/kubernetes/ceph/storageclass-bucket-retain.yaml deleted file mode 100644 index 37361a846..000000000 --- a/cluster/examples/kubernetes/ceph/storageclass-bucket-retain.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-retain-bucket -provisioner: rook-ceph.ceph.rook.io/bucket # driver:namespace:cluster -# set the reclaim policy to retain the bucket when its OBC is deleted -reclaimPolicy: Retain -parameters: - objectStoreName: my-store # port 80 assumed - objectStoreNamespace: rook-ceph # namespace:cluster - region: us-east-1 - # To accommodate brownfield cases reference the existing bucket name here instead - # of in the ObjectBucketClaim (OBC). In this case the provisioner will grant - # access to the bucket by creating a new user, attaching it to the bucket, and - # providing the credentials via a Secret in the namespace of the requesting OBC. - #bucketName: diff --git a/cluster/examples/kubernetes/ceph/test-data/ceph-status-out b/cluster/examples/kubernetes/ceph/test-data/ceph-status-out deleted file mode 100644 index 705dd919d..000000000 --- a/cluster/examples/kubernetes/ceph/test-data/ceph-status-out +++ /dev/null @@ -1 +0,0 @@ -{"fsid":"33310efb-37a1-4e7b-a243-92faaa73e821","health":{"checks":{},"status":"HEALTH_OK"},"election_epoch":94,"quorum":[0,1,2],"quorum_names":["dell-r730-060","dell-r640-018","dell-r640-019"],"quorum_age":4698850,"monmap":{"epoch":1,"fsid":"33310efb-37a1-4e7b-a243-92faaa73e821","modified":"2020-12-09 15:37:23.577101","created":"2020-12-09 15:37:23.577101","min_mon_release":14,"min_mon_release_name":"nautilus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus"],"optional":[]},"mons":[{"rank":0,"name":"dell-r730-060","public_addrs":{"addrvec":[{"type":"v2","addr":"10.1.8.70:3300","nonce":0},{"type":"v1","addr":"10.1.8.70:6789","nonce":0}]},"addr":"10.1.8.70:6789/0","public_addr":"10.1.8.70:6789/0"},{"rank":1,"name":"dell-r640-018","public_addrs":{"addrvec":[{"type":"v2","addr":"10.1.8.109:3300","nonce":0},{"type":"v1","addr":"10.1.8.109:6789","nonce":0}]},"addr":"10.1.8.109:6789/0","public_addr":"10.1.8.109:6789/0"},{"rank":2,"name":"dell-r640-019","public_addrs":{"addrvec":[{"type":"v2","addr":"10.1.8.110:3300","nonce":0},{"type":"v1","addr":"10.1.8.110:6789","nonce":0}]},"addr":"10.1.8.110:6789/0","public_addr":"10.1.8.110:6789/0"}]},"osdmap":{"osdmap":{"epoch":932,"num_osds":6,"num_up_osds":6,"num_in_osds":6,"num_remapped_pgs":0}},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":328}],"num_pgs":328,"num_pools":11,"num_objects":31486,"data_bytes":112604804322,"bytes_used":5943038066688,"bytes_avail":3418874904576,"bytes_total":9361912971264,"write_bytes_sec":13642,"read_op_per_sec":0,"write_op_per_sec":1},"fsmap":{"epoch":383,"id":1,"up":1,"in":1,"max":1,"by_rank":[{"filesystem_id":1,"rank":0,"name":"dell-r730-035","status":"up:active","gid":257907}],"up:standby":1},"mgrmap":{"epoch":74,"active_gid":144154,"active_name":"dell-r640-018","active_addrs":{"addrvec":[{"type":"v2","addr":"10.1.8.109:6816","nonce":56},{"type":"v1","addr":"10.1.8.109:6817","nonce":56}]},"active_addr":"10.1.8.109:6817/56","active_change":"2020-12-18 17:25:57.961667","available":true,"standbys":[{"gid":156146,"name":"dell-r640-019","available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"ansible","can_run":true,"error_string":"","module_options":{"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_url":{"name":"server_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"verify_server":{"name":"verify_server","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"none","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_iterations":{"name":"upmap_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimization iterations","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"str","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_cephfs":{"name":"FEATURE_TOGGLE_cephfs","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_iscsi":{"name":"FEATURE_TOGGLE_iscsi","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_mirroring":{"name":"FEATURE_TOGGLE_mirroring","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_rbd":{"name":"FEATURE_TOGGLE_rbd","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_rgw":{"name":"FEATURE_TOGGLE_rgw","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"str","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"str","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_HOST":{"name":"RGW_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_PORT":{"name":"RGW_API_PORT","type":"str","level":"advanced","flags":0,"default_value":"80","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SCHEME":{"name":"RGW_API_SCHEME","type":"str","level":"advanced","flags":0,"default_value":"http","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_USER_ID":{"name":"RGW_API_USER_ID","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"deepsea","can_run":true,"error_string":"","module_options":{"salt_api_eauth":{"name":"salt_api_eauth","type":"str","level":"advanced","flags":0,"default_value":"sharedsecret","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"salt_api_password":{"name":"salt_api_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"salt_api_url":{"name":"salt_api_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"salt_api_username":{"name":"salt_api_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"str","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"str","level":"advanced","flags":0,"default_value":"30","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"str","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"str","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{}},{"name":"iostat","can_run":true,"error_string":"","module_options":{}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator_cli","can_run":true,"error_string":"","module_options":{"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"persist_interval":{"name":"persist_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how frequently to persist completed events","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{}},{"name":"restful","can_run":true,"error_string":"","module_options":{"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{}},{"name":"volumes","can_run":true,"error_string":"","module_options":{}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]},{"gid":262092,"name":"dell-r730-060","available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"ansible","can_run":true,"error_string":"","module_options":{"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_url":{"name":"server_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"verify_server":{"name":"verify_server","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"none","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_iterations":{"name":"upmap_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimization iterations","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"str","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_cephfs":{"name":"FEATURE_TOGGLE_cephfs","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_iscsi":{"name":"FEATURE_TOGGLE_iscsi","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_mirroring":{"name":"FEATURE_TOGGLE_mirroring","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_rbd":{"name":"FEATURE_TOGGLE_rbd","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_rgw":{"name":"FEATURE_TOGGLE_rgw","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"str","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"str","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_HOST":{"name":"RGW_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_PORT":{"name":"RGW_API_PORT","type":"str","level":"advanced","flags":0,"default_value":"80","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SCHEME":{"name":"RGW_API_SCHEME","type":"str","level":"advanced","flags":0,"default_value":"http","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_USER_ID":{"name":"RGW_API_USER_ID","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"deepsea","can_run":true,"error_string":"","module_options":{"salt_api_eauth":{"name":"salt_api_eauth","type":"str","level":"advanced","flags":0,"default_value":"sharedsecret","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"salt_api_password":{"name":"salt_api_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"salt_api_url":{"name":"salt_api_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"salt_api_username":{"name":"salt_api_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"str","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"str","level":"advanced","flags":0,"default_value":"30","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"str","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"str","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{}},{"name":"iostat","can_run":true,"error_string":"","module_options":{}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator_cli","can_run":true,"error_string":"","module_options":{"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"persist_interval":{"name":"persist_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how frequently to persist completed events","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{}},{"name":"restful","can_run":true,"error_string":"","module_options":{"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{}},{"name":"volumes","can_run":true,"error_string":"","module_options":{}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["dashboard","pg_autoscaler","prometheus"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"ansible","can_run":true,"error_string":"","module_options":{"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_url":{"name":"server_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"verify_server":{"name":"verify_server","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"none","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_iterations":{"name":"upmap_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimization iterations","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"str","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_cephfs":{"name":"FEATURE_TOGGLE_cephfs","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_iscsi":{"name":"FEATURE_TOGGLE_iscsi","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_mirroring":{"name":"FEATURE_TOGGLE_mirroring","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_rbd":{"name":"FEATURE_TOGGLE_rbd","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_rgw":{"name":"FEATURE_TOGGLE_rgw","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"str","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"str","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_HOST":{"name":"RGW_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_PORT":{"name":"RGW_API_PORT","type":"str","level":"advanced","flags":0,"default_value":"80","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SCHEME":{"name":"RGW_API_SCHEME","type":"str","level":"advanced","flags":0,"default_value":"http","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"str","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_USER_ID":{"name":"RGW_API_USER_ID","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"deepsea","can_run":true,"error_string":"","module_options":{"salt_api_eauth":{"name":"salt_api_eauth","type":"str","level":"advanced","flags":0,"default_value":"sharedsecret","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"salt_api_password":{"name":"salt_api_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"salt_api_url":{"name":"salt_api_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"salt_api_username":{"name":"salt_api_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"str","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"str","level":"advanced","flags":0,"default_value":"30","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"str","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"str","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{}},{"name":"iostat","can_run":true,"error_string":"","module_options":{}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator_cli","can_run":true,"error_string":"","module_options":{"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"persist_interval":{"name":"persist_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how frequently to persist completed events","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{}},{"name":"restful","can_run":true,"error_string":"","module_options":{"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{}},{"name":"volumes","can_run":true,"error_string":"","module_options":{}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"None","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"http://dell-r640-018.dsal.lab.eng.rdu2.redhat.com:8443/","prometheus":"http://dell-r640-018.dsal.lab.eng.rdu2.redhat.com:9283/"},"always_on_modules":{"nautilus":["balancer","crash","devicehealth","orchestrator_cli","progress","rbd_support","status","volumes"]}},"servicemap":{"epoch":11,"modified":"2020-12-18 18:23:42.587209","services":{"mds":{"daemons":{"summary":"","dell-r730-035":{"start_epoch":0,"start_stamp":"0.000000","gid":0,"addr":":/0","metadata":{},"task_status":{"scrub status":"idle"}}}},"rgw":{"daemons":{"summary":"","dell-r640-018.rgw0":{"start_epoch":6,"start_stamp":"2020-12-16 11:47:53.536060","gid":64270,"addr":"10.1.8.109:0/314962426","metadata":{"arch":"x86_64","ceph_release":"nautilus","ceph_version":"ceph version 14.2.8-115.el8cp (183dfafff0de1f79fccc983d82e733fedc0e988b) nautilus (stable)","ceph_version_short":"14.2.8-115.el8cp","container_image":"registry.redhat.io/rhceph/rhceph-4-rhel8:latest","cpu":"Intel(R) Xeon(R) Silver 4116 CPU @ 2.10GHz","distro":"rhel","distro_description":"Red Hat Enterprise Linux 8.3 (Ootpa)","distro_version":"8.3","frontend_config#0":"beast endpoint=10.1.8.109:8080","frontend_type#0":"beast","hostname":"dell-r640-018.dsal.lab.eng.rdu2.redhat.com","kernel_description":"#1 SMP Wed Oct 21 13:44:38 EDT 2020","kernel_version":"3.10.0-1160.6.1.el7.x86_64","mem_cgroup_limit":"4294967296","mem_swap_kb":"5242876","mem_total_kb":"196303104","num_handles":"1","os":"Linux","pid":"51","zone_id":"56193434-ef7c-4481-acd7-208fbdae72e7","zone_name":"default","zonegroup_id":"8acd0a96-6962-486a-9368-2d8577f27086","zonegroup_name":"default"},"task_status":{}},"dell-r640-019.rgw0":{"start_epoch":6,"start_stamp":"2020-12-16 11:47:53.535954","gid":64277,"addr":"10.1.8.110:0/132104653","metadata":{"arch":"x86_64","ceph_release":"nautilus","ceph_version":"ceph version 14.2.8-115.el8cp (183dfafff0de1f79fccc983d82e733fedc0e988b) nautilus (stable)","ceph_version_short":"14.2.8-115.el8cp","container_image":"registry.redhat.io/rhceph/rhceph-4-rhel8:latest","cpu":"Intel(R) Xeon(R) Silver 4116 CPU @ 2.10GHz","distro":"rhel","distro_description":"Red Hat Enterprise Linux 8.3 (Ootpa)","distro_version":"8.3","frontend_config#0":"beast endpoint=10.1.8.110:8080","frontend_type#0":"beast","hostname":"dell-r640-019.dsal.lab.eng.rdu2.redhat.com","kernel_description":"#1 SMP Wed Oct 21 13:44:38 EDT 2020","kernel_version":"3.10.0-1160.6.1.el7.x86_64","mem_cgroup_limit":"4294967296","mem_swap_kb":"5242876","mem_total_kb":"196303104","num_handles":"1","os":"Linux","pid":"51","zone_id":"56193434-ef7c-4481-acd7-208fbdae72e7","zone_name":"default","zonegroup_id":"8acd0a96-6962-486a-9368-2d8577f27086","zonegroup_name":"default"},"task_status":{}}}}}},"progress_events":{}} diff --git a/cluster/examples/kubernetes/ceph/toolbox-job.yaml b/cluster/examples/kubernetes/ceph/toolbox-job.yaml deleted file mode 100644 index dfd395db7..000000000 --- a/cluster/examples/kubernetes/ceph/toolbox-job.yaml +++ /dev/null @@ -1,58 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: rook-ceph-toolbox-job - namespace: rook-ceph # namespace:cluster - labels: - app: ceph-toolbox-job -spec: - template: - spec: - initContainers: - - name: config-init - image: rook/ceph:v1.7.2 - command: ["/usr/local/bin/toolbox.sh"] - args: ["--skip-watch"] - imagePullPolicy: IfNotPresent - env: - - name: ROOK_CEPH_USERNAME - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-username - - name: ROOK_CEPH_SECRET - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-secret - volumeMounts: - - mountPath: /etc/ceph - name: ceph-config - - name: mon-endpoint-volume - mountPath: /etc/rook - containers: - - name: script - image: rook/ceph:v1.7.2 - volumeMounts: - - mountPath: /etc/ceph - name: ceph-config - readOnly: true - command: - - "bash" - - "-c" - - | - # Modify this script to run any ceph, rbd, radosgw-admin, or other commands that could - # be run in the toolbox pod. The output of the commands can be seen by getting the pod log. - # - # example: print the ceph status - ceph status - volumes: - - name: mon-endpoint-volume - configMap: - name: rook-ceph-mon-endpoints - items: - - key: data - path: mon-endpoints - - name: ceph-config - emptyDir: {} - restartPolicy: Never diff --git a/cluster/examples/kubernetes/ceph/toolbox.yaml b/cluster/examples/kubernetes/ceph/toolbox.yaml deleted file mode 100644 index 9fa9d3367..000000000 --- a/cluster/examples/kubernetes/ceph/toolbox.yaml +++ /dev/null @@ -1,54 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-ceph-tools - namespace: rook-ceph # namespace:cluster - labels: - app: rook-ceph-tools -spec: - replicas: 1 - selector: - matchLabels: - app: rook-ceph-tools - template: - metadata: - labels: - app: rook-ceph-tools - spec: - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: rook-ceph-tools - image: rook/ceph:v1.7.2 - command: ["/tini"] - args: ["-g", "--", "/usr/local/bin/toolbox.sh"] - imagePullPolicy: IfNotPresent - env: - - name: ROOK_CEPH_USERNAME - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-username - - name: ROOK_CEPH_SECRET - valueFrom: - secretKeyRef: - name: rook-ceph-mon - key: ceph-secret - volumeMounts: - - mountPath: /etc/ceph - name: ceph-config - - name: mon-endpoint-volume - mountPath: /etc/rook - volumes: - - name: mon-endpoint-volume - configMap: - name: rook-ceph-mon-endpoints - items: - - key: data - path: mon-endpoints - - name: ceph-config - emptyDir: {} - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 5 diff --git a/cluster/examples/kubernetes/mysql.yaml b/cluster/examples/kubernetes/mysql.yaml deleted file mode 100644 index c3ac4ce1f..000000000 --- a/cluster/examples/kubernetes/mysql.yaml +++ /dev/null @@ -1,64 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: wordpress-mysql - labels: - app: wordpress -spec: - ports: - - port: 3306 - selector: - app: wordpress - tier: mysql - clusterIP: None ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: mysql-pv-claim - labels: - app: wordpress -spec: - storageClassName: rook-ceph-block - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: wordpress-mysql - labels: - app: wordpress - tier: mysql -spec: - selector: - matchLabels: - app: wordpress - tier: mysql - strategy: - type: Recreate - template: - metadata: - labels: - app: wordpress - tier: mysql - spec: - containers: - - image: mysql:5.6 - name: mysql - env: - - name: MYSQL_ROOT_PASSWORD - value: changeme - ports: - - containerPort: 3306 - name: mysql - volumeMounts: - - name: mysql-persistent-storage - mountPath: /var/lib/mysql - volumes: - - name: mysql-persistent-storage - persistentVolumeClaim: - claimName: mysql-pv-claim diff --git a/cluster/examples/kubernetes/nfs/busybox-rc.yaml b/cluster/examples/kubernetes/nfs/busybox-rc.yaml deleted file mode 100644 index 4b5c8fc24..000000000 --- a/cluster/examples/kubernetes/nfs/busybox-rc.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: nfs-demo - role: busybox - name: nfs-busybox -spec: - replicas: 2 - selector: - matchLabels: - app: nfs-demo - role: busybox - template: - metadata: - labels: - app: nfs-demo - role: busybox - spec: - containers: - - image: busybox - command: - - sh - - -c - - "while true; do date > /mnt/index.html; hostname >> /mnt/index.html; sleep $(($RANDOM % 5 + 5)); done" - imagePullPolicy: IfNotPresent - name: busybox - volumeMounts: - # name must match the volume name below - - name: rook-nfs-vol - mountPath: "/mnt" - volumes: - - name: rook-nfs-vol - persistentVolumeClaim: - claimName: rook-nfs-pv-claim diff --git a/cluster/examples/kubernetes/nfs/crds.yaml b/cluster/examples/kubernetes/nfs/crds.yaml deleted file mode 100644 index f47ffe197..000000000 --- a/cluster/examples/kubernetes/nfs/crds.yaml +++ /dev/null @@ -1,141 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: nfsservers.nfs.rook.io -spec: - group: nfs.rook.io - names: - kind: NFSServer - listKind: NFSServerList - plural: nfsservers - singular: nfsserver - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - - description: NFS Server instance state - jsonPath: .status.state - name: State - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: NFSServer is the Schema for the nfsservers API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: NFSServerSpec represents the spec of NFS daemon - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - type: object - exports: - description: The parameters to configure the NFS export - items: - description: ExportsSpec represents the spec of NFS exports - properties: - name: - description: Name of the export - type: string - persistentVolumeClaim: - description: PVC from which the NFS daemon gets storage for sharing - properties: - claimName: - description: 'ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: Will force the ReadOnly setting in VolumeMounts. Default false. - type: boolean - required: - - claimName - type: object - server: - description: The NFS server configuration - properties: - accessMode: - description: Reading and Writing permissions on the export Valid values are "ReadOnly", "ReadWrite" and "none" - enum: - - ReadOnly - - ReadWrite - - none - type: string - allowedClients: - description: The clients allowed to access the NFS export - items: - description: AllowedClientsSpec represents the client specs for accessing the NFS export - properties: - accessMode: - description: Reading and Writing permissions for the client to access the NFS export Valid values are "ReadOnly", "ReadWrite" and "none" Gets overridden when ServerSpec.accessMode is specified - enum: - - ReadOnly - - ReadWrite - - none - type: string - clients: - description: The clients that can access the share Values can be hostname, ip address, netgroup, CIDR network address, or all - items: - type: string - type: array - name: - description: Name of the clients group - type: string - squash: - description: Squash options for clients Valid values are "none", "rootid", "root", and "all" Gets overridden when ServerSpec.squash is specified - enum: - - none - - rootid - - root - - all - type: string - type: object - type: array - squash: - description: This prevents the root users connected remotely from having root privileges Valid values are "none", "rootid", "root", and "all" - enum: - - none - - rootid - - root - - all - type: string - type: object - type: object - type: array - replicas: - description: Replicas of the NFS daemon - type: integer - type: object - status: - description: NFSServerStatus defines the observed state of NFSServer - properties: - message: - type: string - reason: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/cluster/examples/kubernetes/nfs/nfs-ceph.yaml b/cluster/examples/kubernetes/nfs/nfs-ceph.yaml deleted file mode 100644 index fbdc51dab..000000000 --- a/cluster/examples/kubernetes/nfs/nfs-ceph.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# A rook ceph cluster must be running -# Create a rook ceph cluster using examples in rook/cluster/examples/kubernetes/ceph -# Refer to https://rook.io/docs/rook/master/ceph-quickstart.html for a quick rook cluster setup -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-ceph-claim - namespace: rook-nfs -spec: - storageClassName: rook-ceph-block - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - # Create a Ceph cluster for using this example - # Create a ceph PVC after creating the rook ceph cluster using ceph-pvc.yaml - persistentVolumeClaim: - claimName: nfs-ceph-claim - # A key/value list of annotations - annotations: - rook: nfs diff --git a/cluster/examples/kubernetes/nfs/nfs-xfs.yaml b/cluster/examples/kubernetes/nfs/nfs-xfs.yaml deleted file mode 100644 index 2a85ff032..000000000 --- a/cluster/examples/kubernetes/nfs/nfs-xfs.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -# A storage class with name standard-xfs must be present. -# The storage class must be has xfs filesystem type and prjquota mountOptions. -# This is example storage class for google compute engine pd -# --- -# apiVersion: storage.k8s.io/v1 -# kind: StorageClass -# metadata: -# name: standard-xfs -# parameters: -# type: pd-standard -# fsType: xfs -# mountOptions: -# - prjquota -# provisioner: kubernetes.io/gce-pd -# reclaimPolicy: Delete -# volumeBindingMode: Immediate -# allowVolumeExpansion: true -# -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-xfs-claim - namespace: rook-nfs -spec: - storageClassName: "standard-xfs" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - persistentVolumeClaim: - claimName: nfs-xfs-claim - # A key/value list of annotations - annotations: - rook: nfs diff --git a/cluster/examples/kubernetes/nfs/nfs.yaml b/cluster/examples/kubernetes/nfs/nfs.yaml deleted file mode 100644 index 742fcf9de..000000000 --- a/cluster/examples/kubernetes/nfs/nfs.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -# A default storageclass must be present -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-default-claim - namespace: rook-nfs -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - persistentVolumeClaim: - claimName: nfs-default-claim - # A key/value list of annotations - annotations: - rook: nfs diff --git a/cluster/examples/kubernetes/nfs/operator.yaml b/cluster/examples/kubernetes/nfs/operator.yaml deleted file mode 100644 index 1384d2afc..000000000 --- a/cluster/examples/kubernetes/nfs/operator.yaml +++ /dev/null @@ -1,136 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: rook-nfs-system # namespace:operator ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-operator - namespace: rook-nfs-system # namespace:operator ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-nfs-operator -subjects: - - kind: ServiceAccount - name: rook-nfs-operator - namespace: rook-nfs-system # namespace:operator ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-nfs-operator -rules: - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - pods - verbs: - - list - - get - - watch - - create - - apiGroups: - - "" - resources: - - services - verbs: - - create - - get - - list - - patch - - update - - watch - - apiGroups: - - apps - resources: - - statefulsets - verbs: - - create - - get - - list - - patch - - update - - watch - - apiGroups: - - nfs.rook.io - resources: - - nfsservers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - nfs.rook.io - resources: - - nfsservers/status - - nfsservers/finalizers - verbs: - - get - - patch - - update ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-nfs-operator - namespace: rook-nfs-system # namespace:operator - labels: - app: rook-nfs-operator -spec: - replicas: 1 - selector: - matchLabels: - app: rook-nfs-operator - template: - metadata: - labels: - app: rook-nfs-operator - spec: - serviceAccountName: rook-nfs-operator - containers: - - name: rook-nfs-operator - image: rook/nfs:v1.7.2 - imagePullPolicy: IfNotPresent - args: ["nfs", "operator"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace diff --git a/cluster/examples/kubernetes/nfs/psp.yaml b/cluster/examples/kubernetes/nfs/psp.yaml deleted file mode 100644 index c61051110..000000000 --- a/cluster/examples/kubernetes/nfs/psp.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: rook-nfs-policy -spec: - privileged: true - fsGroup: - rule: RunAsAny - allowedCapabilities: - - DAC_READ_SEARCH - - SYS_RESOURCE - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret - - hostPath diff --git a/cluster/examples/kubernetes/nfs/pvc.yaml b/cluster/examples/kubernetes/nfs/pvc.yaml deleted file mode 100644 index 789de6b86..000000000 --- a/cluster/examples/kubernetes/nfs/pvc.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: rook-nfs-pv-claim -spec: - storageClassName: "rook-nfs-share1" - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi diff --git a/cluster/examples/kubernetes/nfs/rbac.yaml b/cluster/examples/kubernetes/nfs/rbac.yaml deleted file mode 100644 index 3f1224d0f..000000000 --- a/cluster/examples/kubernetes/nfs/rbac.yaml +++ /dev/null @@ -1,60 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: rook-nfs ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-server - namespace: rook-nfs ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-provisioner-runner -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "update", "patch"] - - apiGroups: [""] - resources: ["services", "endpoints"] - verbs: ["get"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - resourceNames: ["rook-nfs-policy"] - verbs: ["use"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: - - nfs.rook.io - resources: - - "*" - verbs: - - "*" ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-provisioner-runner -subjects: - - kind: ServiceAccount - name: - rook-nfs-server - # replace with namespace where provisioner is deployed - namespace: rook-nfs -roleRef: - kind: ClusterRole - name: rook-nfs-provisioner-runner - apiGroup: rbac.authorization.k8s.io diff --git a/cluster/examples/kubernetes/nfs/sc.yaml b/cluster/examples/kubernetes/nfs/sc.yaml deleted file mode 100644 index 2ad62ed6b..000000000 --- a/cluster/examples/kubernetes/nfs/sc.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - labels: - app: rook-nfs - name: rook-nfs-share1 -parameters: - exportName: share1 - nfsServerName: rook-nfs - nfsServerNamespace: rook-nfs -provisioner: nfs.rook.io/rook-nfs-provisioner -reclaimPolicy: Delete -volumeBindingMode: Immediate diff --git a/cluster/examples/kubernetes/nfs/scc.yaml b/cluster/examples/kubernetes/nfs/scc.yaml deleted file mode 100644 index 4c939ddcd..000000000 --- a/cluster/examples/kubernetes/nfs/scc.yaml +++ /dev/null @@ -1,36 +0,0 @@ -kind: SecurityContextConstraints -apiVersion: security.openshift.io/v1 -metadata: - name: rook-nfs -allowHostDirVolumePlugin: true -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegedContainer: false -allowedCapabilities: - - SYS_ADMIN - - DAC_READ_SEARCH -defaultAddCapabilities: null -fsGroup: - type: MustRunAs -priority: null -readOnlyRootFilesystem: false -requiredDropCapabilities: - - KILL - - MKNOD - - SYS_CHROOT -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -volumes: - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret -users: - - system:serviceaccount:rook-nfs:rook-nfs-server diff --git a/cluster/examples/kubernetes/nfs/web-rc.yaml b/cluster/examples/kubernetes/nfs/web-rc.yaml deleted file mode 100644 index 92987c8c1..000000000 --- a/cluster/examples/kubernetes/nfs/web-rc.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: nfs-demo - role: web-frontend - name: nfs-web -spec: - replicas: 2 - selector: - matchLabels: - app: nfs-demo - role: web-frontend - template: - metadata: - labels: - app: nfs-demo - role: web-frontend - spec: - containers: - - name: web - image: nginx - ports: - - name: web - containerPort: 80 - volumeMounts: - # name must match the volume name below - - name: rook-nfs-vol - mountPath: "/usr/share/nginx/html" - volumes: - - name: rook-nfs-vol - persistentVolumeClaim: - claimName: rook-nfs-pv-claim diff --git a/cluster/examples/kubernetes/nfs/web-service.yaml b/cluster/examples/kubernetes/nfs/web-service.yaml deleted file mode 100644 index b73cac2bc..000000000 --- a/cluster/examples/kubernetes/nfs/web-service.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: nfs-web -spec: - ports: - - port: 80 - selector: - role: web-frontend diff --git a/cluster/examples/kubernetes/nfs/webhook.yaml b/cluster/examples/kubernetes/nfs/webhook.yaml deleted file mode 100644 index e544c6904..000000000 --- a/cluster/examples/kubernetes/nfs/webhook.yaml +++ /dev/null @@ -1,128 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system -rules: - - apiGroups: [""] - resources: ["secrets"] - resourceNames: - - "rook-nfs-webhook-cert" - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-nfs-webhook -subjects: - - apiGroup: "" - kind: ServiceAccount - name: rook-nfs-webhook - namespace: rook-nfs-system ---- -apiVersion: cert-manager.io/v1alpha2 -kind: Certificate -metadata: - name: rook-nfs-webhook-cert - namespace: rook-nfs-system -spec: - dnsNames: - - rook-nfs-webhook.rook-nfs-system.svc - - rook-nfs-webhook.rook-nfs-system.svc.cluster.local - issuerRef: - kind: Issuer - name: rook-nfs-selfsigned-issuer - secretName: rook-nfs-webhook-cert ---- -apiVersion: cert-manager.io/v1alpha2 -kind: Issuer -metadata: - name: rook-nfs-selfsigned-issuer - namespace: rook-nfs-system -spec: - selfSigned: {} ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - annotations: - cert-manager.io/inject-ca-from: rook-nfs-system/rook-nfs-webhook-cert - creationTimestamp: null - name: rook-nfs-validating-webhook-configuration -webhooks: - - clientConfig: - caBundle: Cg== - service: - name: rook-nfs-webhook - namespace: rook-nfs-system - path: /validate-nfs-rook-io-v1alpha1-nfsserver - failurePolicy: Fail - name: validation.nfsserver.nfs.rook.io - rules: - - apiGroups: - - nfs.rook.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - nfsservers ---- -kind: Service -apiVersion: v1 -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system -spec: - selector: - app: rook-nfs-webhook - ports: - - port: 443 - targetPort: webhook-server ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system - labels: - app: rook-nfs-webhook -spec: - replicas: 1 - selector: - matchLabels: - app: rook-nfs-webhook - template: - metadata: - labels: - app: rook-nfs-webhook - spec: - containers: - - name: rook-nfs-webhook - image: rook/nfs:v1.7.2 - imagePullPolicy: IfNotPresent - args: ["nfs", "webhook"] - ports: - - containerPort: 9443 - name: webhook-server - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: rook-nfs-webhook-cert diff --git a/cluster/olm/ceph/README.md b/cluster/olm/ceph/README.md deleted file mode 100644 index 865b6e321..000000000 --- a/cluster/olm/ceph/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# Build Rook's CSV file - -Just run `make CSV_VERSION=1.0.0 csv-ceph` like this: - -```console -make csv-ceph CSV_VERSION=1.0.1 CSV_PLATFORM=k8s ROOK_OP_VERSION=rook/ceph:v1.0.1 -``` - ->``` ->INFO[0000] Generating CSV manifest version 1.0.1 ->INFO[0000] Fill in the following required fields in file deploy/olm-catalog/ceph.csv.yaml: -> spec.keywords -> spec.maintainers -> spec.provider -> spec.labels ->INFO[0000] Create deploy/olm-catalog/ceph.csv.yaml ->INFO[0000] Create deploy/olm-catalog/_generated.concat_crd.yaml -> ->Congratulations! ->Your Rook CSV 1.0.1 file for k8s is ready at: cluster/olm/ceph/deploy/olm-catalog/rook-ceph.v1.0.1.clusterserviceversion.yaml ->Push it to https://github.com/operator-framework/community-operators as well as the CRDs files from cluster/olm/ceph/deploy/crds and the package file cluster/olm/ceph/assemble/rook-ceph.package.yaml. ->``` - -Or for OpenShift use: `make CSV_VERSION=1.0.0 CSV_PLATFORM=ocp csv-ceph`. diff --git a/cluster/olm/ceph/assemble/metadata-common.yaml b/cluster/olm/ceph/assemble/metadata-common.yaml deleted file mode 100644 index 48b4734e7..000000000 --- a/cluster/olm/ceph/assemble/metadata-common.yaml +++ /dev/null @@ -1,415 +0,0 @@ -spec: - replaces: rook-ceph.v1.1.1 - customresourcedefinitions: - owned: - - kind: CephCluster - name: cephclusters.ceph.rook.io - version: v1 - displayName: Ceph Cluster - description: Represents a Ceph cluster. - - kind: CephBlockPool - name: cephblockpools.ceph.rook.io - version: v1 - displayName: Ceph Block Pool - description: Represents a Ceph Block Pool. - - kind: CephObjectStore - name: cephobjectstores.ceph.rook.io - version: v1 - displayName: Ceph Object Store - description: Represents a Ceph Object Store. - specDescriptors: - - description: Coding Chunks - displayName: Coding Chunks - path: dataPool.erasureCoded.codingChunks - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:dataPool" - - "urn:alm:descriptor:com.tectonic.ui:number" - - description: Data Chunks - displayName: Data Chunks - path: dataPool.erasureCoded.dataChunks - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:dataPool" - - "urn:alm:descriptor:com.tectonic.ui:number" - - description: failureDomain - displayName: failureDomain - path: dataPool.failureDomain - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:dataPool" - - "urn:alm:descriptor:com.tectonic.ui:text" - - description: Size - displayName: Size - path: dataPool.replicated.size - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:dataPool" - - "urn:alm:descriptor:com.tectonic.ui:number" - - description: Annotations - displayName: Annotations - path: gateway.annotations - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" - - "urn:alm:descriptor:io.kubernetes:annotations" - - description: Instances - displayName: Instances - path: gateway.instances - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" - - "urn:alm:descriptor:com.tectonic.ui:number" - - description: Resources - displayName: Resources - path: gateway.resources - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" - - "urn:alm:descriptor:com.tectonic.ui:resourceRequirements" - - description: placement - displayName: placement - path: gateway.placement - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" - - "urn:alm:descriptor:io.kubernetes:placement" - - description: securePort - displayName: securePort - path: gateway.securePort - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" - - "urn:alm:descriptor:io.kubernetes:securePort" - - description: sslCertificateRef - displayName: sslCertificateRef - path: gateway.sslCertificateRef - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" - - "urn:alm:descriptor:io.kubernetes:sslCertificateRef" - - description: Type - displayName: Type - path: gateway.type - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" - - "urn:alm:descriptor:com.tectonic.ui:text" - - description: Coding Chunks - displayName: Coding Chunks - path: metadataPool.erasureCoded.codingChunks - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:metadataPool" - - "urn:alm:descriptor:com.tectonic.ui:number" - - description: Data Chunks - displayName: Data Chunks - path: metadataPool.erasureCoded.dataChunks - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:metadataPool" - - "urn:alm:descriptor:com.tectonic.ui:number" - - description: failureDomain - displayName: failureDomain - path: metadataPool.failureDomain - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:metadataPool" - - "urn:alm:descriptor:com.tectonic.ui:text" - - description: Size - displayName: Size - path: metadataPool.replicated.size - x-descriptors: - - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:metadataPool" - - "urn:alm:descriptor:com.tectonic.ui:number" - - kind: CephObjectStoreUser - name: cephobjectstoreusers.ceph.rook.io - version: v1 - displayName: Ceph Object Store User - description: Represents a Ceph Object Store User. - - kind: CephNFS - name: cephnfses.ceph.rook.io - version: v1 - displayName: Ceph NFS - description: Represents a cluster of Ceph NFS ganesha gateways. - - kind: CephClient - name: cephclients.ceph.rook.io - version: v1 - displayName: Ceph Client - description: Represents a Ceph User. - - kind: CephFilesystem - name: cephfilesystems.ceph.rook.io - version: v1 - displayName: Ceph Filesystem - description: Represents a Ceph Filesystem. - - kind: CephFilesystemMirror - name: cephfilesystemmirrors.ceph.rook.io - version: v1 - displayName: Ceph Filesystem Mirror - description: Represents a Ceph Filesystem Mirror. - - kind: CephRBDMirror - name: cephrbdmirrors.ceph.rook.io - version: v1 - displayName: Ceph RBD Mirror - description: Represents a Ceph RBD Mirror. - - kind: CephObjectRealm - name: cephobjectrealms.ceph.rook.io - version: v1 - displayName: Ceph Object Store Realm - description: Represents a Ceph Object Store Realm. - - kind: CephObjectZoneGroup - name: cephobjectzonegroups.ceph.rook.io - version: v1 - displayName: Ceph Object Store Zone Group - description: Represents a Ceph Object Store Zone Group. - - kind: CephObjectZone - name: cephobjectzones.ceph.rook.io - version: v1 - displayName: Ceph Object Store Zone - description: Represents a Ceph Object Store Zone. - displayName: Rook-Ceph - description: | - - The Rook-Ceph storage operator packages, deploys, manages, upgrades and scales Ceph storage for providing persistent storage to infrastructure services (Logging, Metrics, Registry) as well as stateful applications in Kubernetes clusters. - - ## Rook-Ceph Storage Operator - - Rook runs as a cloud-native service in Kubernetes clusters for optimal integration with applications in need of storage, and handles the heavy-lifting behind the scenes such as provisioning and management. - Rook orchestrates battle-tested open-source storage technology Ceph, which has years of production deployments and runs some of the worlds largest clusters. - - Ceph is a massively scalable, software-defined, cloud native storage platform that offers block, file and object storage services. - Ceph can be used to back a wide variety of applications including relational databases, NoSQL databases, CI/CD tool-sets, messaging, AI/ML and analytics applications. - Ceph is a proven storage platform that backs some of the world's largest storage deployments and has a large vibrant open source community backing the project. - - ## Supported features - * **High Availability and resiliency** - Ceph has no single point of failures (SPOF) and all its components work natively in a highly available fashion - * **Data Protection** - Ceph periodically scrub for inconsistent objects and repair them if necessary, making sure your replicas are always coherent - * **Consistent storage platform across hybrid cloud** - Ceph can be deployed anywhere (on-premise or bare metal) and thus offers a similar experience regardless - * **Block, File & Object storage service** - Ceph can expose your data through several storage interfaces, solving all the application use cases - * **Scale up/down** - addition and removal of storage is fully covered by the operator. - * **Dashboard** - The Operator deploys a dashboard for monitoring and introspecting your cluster. - - ## Before you start - https://rook.io/docs/rook/v1.0/k8s-pre-reqs.html - - keywords: - [ - "rook", - "ceph", - "storage", - "object storage", - "open source", - "block storage", - "shared filesystem", - ] - minKubeVersion: 1.10.0 - labels: - alm-owner-etcd: rookoperator - operated-by: rookoperator - selector: - matchLabels: - alm-owner-etcd: rookoperator - operated-by: rookoperator - links: - - name: Blog - url: https://blog.rook.io - - name: Documentation - url: https://rook.github.io/docs/rook/v1.0/ - icon: - - base64data: PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4KPCEtLSBHZW5lcmF0b3I6IEFkb2JlIElsbHVzdHJhdG9yIDIzLjAuMiwgU1ZHIEV4cG9ydCBQbHVnLUluIC4gU1ZHIFZlcnNpb246IDYuMDAgQnVpbGQgMCkgIC0tPgo8c3ZnIHZlcnNpb249IjEuMSIgaWQ9IkxheWVyXzEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4IgoJIHZpZXdCb3g9IjAgMCA3MCA3MCIgc3R5bGU9ImVuYWJsZS1iYWNrZ3JvdW5kOm5ldyAwIDAgNzAgNzA7IiB4bWw6c3BhY2U9InByZXNlcnZlIj4KPHN0eWxlIHR5cGU9InRleHQvY3NzIj4KCS5zdDB7ZmlsbDojMkIyQjJCO30KPC9zdHlsZT4KPGc+Cgk8Zz4KCQk8Zz4KCQkJPHBhdGggY2xhc3M9InN0MCIgZD0iTTUwLjUsNjcuNkgxOS45Yy04LDAtMTQuNS02LjUtMTQuNS0xNC41VjI5LjJjMC0xLjEsMC45LTIuMSwyLjEtMi4xaDU1LjRjMS4xLDAsMi4xLDAuOSwyLjEsMi4xdjIzLjkKCQkJCUM2NSw2MS4xLDU4LjUsNjcuNiw1MC41LDY3LjZ6IE05LjYsMzEuMnYyMS45YzAsNS43LDQuNiwxMC4zLDEwLjMsMTAuM2gzMC42YzUuNywwLDEwLjMtNC42LDEwLjMtMTAuM1YzMS4ySDkuNnoiLz4KCQk8L2c+CgkJPGc+CgkJCTxwYXRoIGNsYXNzPSJzdDAiIGQ9Ik00Mi40LDU2LjdIMjhjLTEuMSwwLTIuMS0wLjktMi4xLTIuMXYtNy4yYzAtNS4xLDQuMi05LjMsOS4zLTkuM3M5LjMsNC4yLDkuMyw5LjN2Ny4yCgkJCQlDNDQuNSw1NS43LDQzLjYsNTYuNyw0Mi40LDU2Ljd6IE0zMCw1Mi41aDEwLjN2LTUuMmMwLTIuOS0yLjMtNS4yLTUuMi01LjJjLTIuOSwwLTUuMiwyLjMtNS4yLDUuMlY1Mi41eiIvPgoJCTwvZz4KCQk8Zz4KCQkJPHBhdGggY2xhc3M9InN0MCIgZD0iTTYyLjksMjMuMkM2Mi45LDIzLjIsNjIuOSwyMy4yLDYyLjksMjMuMmwtMTEuMSwwYy0xLjEsMC0yLjEtMC45LTIuMS0yLjFjMC0xLjEsMC45LTIuMSwyLjEtMi4xCgkJCQljMCwwLDAsMCwwLDBsOS4xLDBWNi43aC02Ljl2My41YzAsMC41LTAuMiwxLjEtMC42LDEuNWMtMC40LDAuNC0wLjksMC42LTEuNSwwLjZsMCwwbC0xMS4xLDBjLTEuMSwwLTIuMS0wLjktMi4xLTIuMVY2LjdoLTYuOQoJCQkJdjMuNWMwLDEuMS0wLjksMi4xLTIuMSwyLjFsLTExLjEsMGMtMC41LDAtMS4xLTAuMi0xLjUtMC42Yy0wLjQtMC40LTAuNi0wLjktMC42LTEuNVY2LjdIOS42djEyLjRoOWMxLjEsMCwyLjEsMC45LDIuMSwyLjEKCQkJCXMtMC45LDIuMS0yLjEsMi4xaC0xMWMtMS4xLDAtMi4xLTAuOS0yLjEtMi4xVjQuNmMwLTEuMSwwLjktMi4xLDIuMS0yLjFoMTEuMWMxLjEsMCwyLjEsMC45LDIuMSwyLjF2My41bDcsMFY0LjYKCQkJCWMwLTEuMSwwLjktMi4xLDIuMS0yLjFoMTEuMWMxLjEsMCwyLjEsMC45LDIuMSwyLjF2My41bDYuOSwwVjQuNmMwLTEuMSwwLjktMi4xLDIuMS0yLjFoMTEuMUM2NCwyLjYsNjUsMy41LDY1LDQuNnYxNi41CgkJCQljMCwwLjUtMC4yLDEuMS0wLjYsMS41QzY0LDIzLDYzLjQsMjMuMiw2Mi45LDIzLjJ6Ii8+CgkJPC9nPgoJPC9nPgo8L2c+Cjwvc3ZnPg== - mediatype: image/svg+xml - installModes: - - type: OwnNamespace - supported: true - - type: SingleNamespace - supported: true - - type: MultiNamespace - supported: false - - type: AllNamespaces - supported: false - -metadata: - annotations: - tectonic-visibility: ocs - repository: https://github.com/rook/rook - containerImage: rook/ceph:v1.2.2 - alm-examples: |- - [ - { - "apiVersion": "ceph.rook.io/v1", - "kind": "CephCluster", - "metadata": { - "name": "my-rook-ceph", - "namespace": "my-rook-ceph" - }, - "spec": { - "cephVersion": { - "image": "quay.io/ceph/ceph:v16.2.5" - }, - "dataDirHostPath": "/var/lib/rook", - "mon": { - "count": 3 - }, - "dashboard": { - "enabled": true - }, - "network": { - "hostNetwork": false - }, - "rbdMirroring": { - "workers": 0 - }, - "storage": { - "useAllNodes": true, - "useAllDevices": true - } - } - }, - { - "apiVersion": "ceph.rook.io/v1", - "kind": "CephBlockPool", - "metadata": { - "name": "replicapool", - "namespace": "my-rook-ceph" - }, - "spec": { - "failureDomain": "host", - "replicated": { - "size": 3 - }, - "annotations": null - } - }, - { - "apiVersion": "ceph.rook.io/v1", - "kind": "CephObjectStore", - "metadata": { - "name": "my-store", - "namespace": "my-rook-ceph" - }, - "spec": { - "metadataPool": { - "failureDomain": "host", - "replicated": { - "size": 3 - } - }, - "dataPool": { - "failureDomain": "host", - "replicated": { - "size": 3 - } - }, - "gateway": { - "type": "s3", - "sslCertificateRef": null, - "port": 8080, - "securePort": null, - "instances": 1, - "placement": null, - "annotations": null, - "resources": null - } - } - }, - { - "apiVersion": "ceph.rook.io/v1", - "kind": "CephObjectStoreUser", - "metadata": { - "name": "my-user", - "namespace": "my-rook-ceph" - }, - "spec": { - "store": "my-store", - "displayName": "my display name" - } - }, - { - "apiVersion": "ceph.rook.io/v1", - "kind": "CephNFS", - "metadata": { - "name": "my-nfs", - "namespace": "rook-ceph" - }, - "spec": { - "rados": { - "pool": "myfs-data0", - "namespace": "nfs-ns" - }, - "server": { - "active": 3, - "placement": null, - "annotations": null, - "resources": null - } - } - }, - { - "apiVersion": "ceph.rook.io/v1", - "kind": "CephClient", - "metadata": { - "name": "cinder", - "namespace": "rook-ceph" - }, - "spec": { - "caps": { - "mon": "profile rbd", - "osd": "profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images" - } - } - }, - { - "apiVersion": "ceph.rook.io/v1", - "kind": "CephFilesystem", - "metadata": { - "name": "myfs", - "namespace": "rook-ceph" - }, - "spec": { - "dataPools": [ - { - "compressionMode": "", - "crushRoot": "", - "deviceClass": "", - "erasureCoded": { - "algorithm": "", - "codingChunks": 0, - "dataChunks": 0 - }, - "failureDomain": "host", - "replicated": { - "requireSafeReplicaSize": false, - "size": 1, - "targetSizeRatio": 0.5 - } - } - ], - "metadataPool": { - "compressionMode": "", - "crushRoot": "", - "deviceClass": "", - "erasureCoded": { - "algorithm": "", - "codingChunks": 0, - "dataChunks": 0 - }, - "failureDomain": "", - "replicated": { - "requireSafeReplicaSize": false, - "size": 1, - "targetSizeRatio": 0 - } - }, - "metadataServer": { - "activeCount": 1, - "activeStandby": true, - "placement": {}, - "resources": {} - }, - "preservePoolsOnDelete": false, - "preserveFilesystemOnDelete": false - } - }, - { - "apiVersion": "ceph.rook.io/v1", - "kind": "CephRBDMirror", - "metadata": { - "name": "my-rbd-mirror", - "namespace": "rook-ceph" - }, - "spec": { - "annotations": null, - "count": 1, - "placement": { - "topologyKey": "kubernetes.io/hostname" - }, - "resources": null - } - } - ] diff --git a/cluster/olm/ceph/assemble/metadata-k8s.yaml b/cluster/olm/ceph/assemble/metadata-k8s.yaml deleted file mode 100644 index ca0da382b..000000000 --- a/cluster/olm/ceph/assemble/metadata-k8s.yaml +++ /dev/null @@ -1,14 +0,0 @@ -metadata: - annotations: - categories: Storage - description: Install and maintain Ceph Storage cluster - createdAt: 2019-05-13T18-08-04Z - support: https://slack.rook.io/ - certified: "false" - capabilities: Full Lifecycle -spec: - maintainers: - - name: The Rook Authors - email: cncf-rook-info@lists.cncf.io - provider: - name: The Rook Authors diff --git a/cluster/olm/ceph/assemble/metadata-ocp.yaml b/cluster/olm/ceph/assemble/metadata-ocp.yaml deleted file mode 100644 index 97472c923..000000000 --- a/cluster/olm/ceph/assemble/metadata-ocp.yaml +++ /dev/null @@ -1,19 +0,0 @@ -spec: - install: - spec: - clusterPermissions: - - rules: - - verbs: - - use - apiGroups: - - security.openshift.io - resources: - - securitycontextconstraints - resourceNames: - - privileged - serviceAccountName: rook-ceph-system - maintainers: - - name: Red Hat, Inc. - email: customerservice@redhat.com - provider: - name: Red Hat, Inc. diff --git a/cluster/olm/ceph/assemble/metadata-okd.yaml b/cluster/olm/ceph/assemble/metadata-okd.yaml deleted file mode 100644 index 2516a1ebf..000000000 --- a/cluster/olm/ceph/assemble/metadata-okd.yaml +++ /dev/null @@ -1,27 +0,0 @@ -metadata: - annotations: - categories: Storage - description: Install and maintain Ceph Storage cluster - createdAt: 2019-05-13T18-08-04Z - support: https://slack.rook.io/ - certified: "false" - capabilities: Full Lifecycle -spec: - install: - spec: - clusterPermissions: - - rules: - - verbs: - - use - apiGroups: - - security.openshift.io - resources: - - securitycontextconstraints - resourceNames: - - privileged - serviceAccountName: rook-ceph-system - maintainers: - - name: The Rook Authors - email: cncf-rook-info@lists.cncf.io - provider: - name: The Rook Authors diff --git a/cluster/olm/ceph/assemble/objectbucket.io_objectbucketclaims.yaml b/cluster/olm/ceph/assemble/objectbucket.io_objectbucketclaims.yaml deleted file mode 100644 index 804e699ae..000000000 --- a/cluster/olm/ceph/assemble/objectbucket.io_objectbucketclaims.yaml +++ /dev/null @@ -1,44 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: objectbucketclaims.objectbucket.io -spec: - group: objectbucket.io - names: - kind: ObjectBucketClaim - listKind: ObjectBucketClaimList - plural: objectbucketclaims - singular: objectbucketclaim - shortNames: - - obc - - obcs - scope: Namespaced - versions: - - name: v1alpha1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - storageClassName: - type: string - bucketName: - type: string - generateBucketName: - type: string - additionalConfig: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - objectBucketName: - type: string - status: - type: object - x-kubernetes-preserve-unknown-fields: true - subresources: - status: {} diff --git a/cluster/olm/ceph/assemble/objectbucket.io_objectbuckets.yaml b/cluster/olm/ceph/assemble/objectbucket.io_objectbuckets.yaml deleted file mode 100644 index 60a166b7f..000000000 --- a/cluster/olm/ceph/assemble/objectbucket.io_objectbuckets.yaml +++ /dev/null @@ -1,69 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: objectbuckets.objectbucket.io -spec: - group: objectbucket.io - names: - kind: ObjectBucket - listKind: ObjectBucketList - plural: objectbuckets - singular: objectbucket - shortNames: - - ob - - obs - scope: Cluster - versions: - - name: v1alpha1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - storageClassName: - type: string - endpoint: - type: object - nullable: true - properties: - bucketHost: - type: string - bucketPort: - type: integer - format: int32 - bucketName: - type: string - region: - type: string - subRegion: - type: string - additionalConfig: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - authentication: - type: object - nullable: true - items: - type: object - x-kubernetes-preserve-unknown-fields: true - additionalState: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - reclaimPolicy: - type: string - claimRef: - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - type: object - x-kubernetes-preserve-unknown-fields: true - subresources: - status: {} diff --git a/cluster/olm/ceph/assemble/rook-ceph.package.yaml b/cluster/olm/ceph/assemble/rook-ceph.package.yaml deleted file mode 100644 index b0c145f4a..000000000 --- a/cluster/olm/ceph/assemble/rook-ceph.package.yaml +++ /dev/null @@ -1,4 +0,0 @@ -packageName: rook-ceph -channels: - - name: beta - currentCSV: rook-ceph.v1.2.2 diff --git a/cluster/olm/ceph/generate-rook-csv-templates.sh b/cluster/olm/ceph/generate-rook-csv-templates.sh deleted file mode 100755 index d6043cb2e..000000000 --- a/cluster/olm/ceph/generate-rook-csv-templates.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -set -e - -export OLM_SKIP_PKG_FILE_GEN="true" -export OLM_INCLUDE_CEPHFS_CSI="true" -export OLM_INCLUDE_RBD_CSI="true" -export OLM_INCLUDE_REPORTER="true" - -if [ -f "Dockerfile" ]; then - # if this is being executed from the images/ceph/ dir, - # back out to the source dir - cd ../../ -fi - -: "${OLM_CATALOG_DIR:=cluster/olm/ceph}" -DEPLOY_DIR="$OLM_CATALOG_DIR/deploy" -CRDS_DIR="$DEPLOY_DIR/crds" - -TEMPLATES_DIR="$OLM_CATALOG_DIR/templates" - -: "${SED_IN_PLACE:="build/sed-in-place"}" - -function generate_template() { - local provider=$1 - local csv_manifest_path="$DEPLOY_DIR/olm-catalog/${provider}/9999.9999.9999/manifests" - local tmp_csv_gen_file="$csv_manifest_path/ceph.clusterserviceversion.yaml" - local csv_template_file="$TEMPLATES_DIR/rook-ceph-${provider}.vVERSION.clusterserviceversion.yaml.in" - rm -rf $csv_manifest_path - - # v9999.9999.9999 is just a placeholder. operator-sdk requires valid semver here. - (cluster/olm/ceph/generate-rook-csv.sh "9999.9999.9999" $provider "{{.RookOperatorImage}}") - mv $tmp_csv_gen_file $csv_template_file - - # replace the placeholder with the templated value - $SED_IN_PLACE "s/9999.9999.9999/{{.RookOperatorCsvVersion}}/g" $csv_template_file - - echo "Template stored at $csv_template_file" -} - -# start clean -if [ -d $TEMPLATES_DIR ]; then - rm -rf $TEMPLATES_DIR -fi -mkdir -p $TEMPLATES_DIR - -generate_template "ocp" -generate_template "k8s" - -cp -R $CRDS_DIR $TEMPLATES_DIR/ diff --git a/cluster/olm/ceph/generate-rook-csv.sh b/cluster/olm/ceph/generate-rook-csv.sh deleted file mode 100755 index 263d4399c..000000000 --- a/cluster/olm/ceph/generate-rook-csv.sh +++ /dev/null @@ -1,278 +0,0 @@ -#!/usr/bin/env bash -set -e - -################## -# INIT VARIABLES # -################## -: "${OLM_CATALOG_DIR:=cluster/olm/ceph}" -ASSEMBLE_FILE_COMMON="$OLM_CATALOG_DIR/assemble/metadata-common.yaml" -ASSEMBLE_FILE_K8S="$OLM_CATALOG_DIR/assemble/metadata-k8s.yaml" -ASSEMBLE_FILE_OCP="$OLM_CATALOG_DIR/assemble/metadata-ocp.yaml" -ASSEMBLE_FILE_OKD="$OLM_CATALOG_DIR/assemble/metadata-okd.yaml" -PACKAGE_FILE="$OLM_CATALOG_DIR/assemble/rook-ceph.package.yaml" -SUPPORTED_PLATFORMS='k8s|ocp|okd' - -operator_sdk="${OPERATOR_SDK:-operator-sdk}" -yq="${YQ:-yq}" - -# Default CSI to true -: "${OLM_INCLUDE_CEPHFS_CSI:=true}" -: "${OLM_INCLUDE_RBD_CSI:=true}" -: "${OLM_INCLUDE_REPORTER:=true}" - -########## -# CHECKS # -########## -if ! command -v "$operator_sdk" >/dev/null && [ ! -f "$operator_sdk" ]; then - echo "operator-sdk is not installed $operator_sdk" - echo "follow instructions here: https://github.com/operator-framework/operator-sdk/#quick-start" - exit 1 -fi - -if ! command -v "$yq" >/dev/null && [ ! -f "$yq" ]; then - echo "yq is not installed" - echo "follow instructions here: https://github.com/mikefarah/yq#install" - exit 1 -fi - -if [[ -z "$1" ]]; then - echo "Please provide a version, e.g:" - echo "" - echo "ARGUMENT'S ORDER MATTERS" - echo "" - echo "make csv-ceph CSV_VERSION=1.0.1 CSV_PLATFORM=k8s ROOK_OP_VERSION=rook/ceph:v1.0.1" - exit 1 -fi -VERSION=$1 - -if [[ -z $2 ]]; then - echo "Please provide a platform, choose one of these: $SUPPORTED_PLATFORMS, e.g:" - echo "" - echo "ARGUMENT'S ORDER MATTERS" - echo "" - echo "make csv-ceph CSV_VERSION=1.0.1 CSV_PLATFORM=k8s ROOK_OP_VERSION=rook/ceph:v1.0.1" - exit 1 -fi - -if [[ -n $2 ]]; then - if [[ ! $2 =~ $SUPPORTED_PLATFORMS ]]; then - echo "Platform $2 is not supported" - echo "Please choose one of these: $SUPPORTED_PLATFORMS" - exit 1 - fi - PLATFORM=$2 -fi - -if [[ -z $3 ]]; then - echo "Please provide an operator version, e.g:" - echo "" - echo "ARGUMENT'S ORDER MATTERS" - echo "" - echo "make csv-ceph CSV_VERSION=1.0.1 CSV_PLATFORM=k8s ROOK_OP_VERSION=rook/ceph:v1.0.1" - exit 1 -fi -ROOK_OP_VERSION=$3 - -############# -# VARIABLES # -############# -: "${SED_IN_PLACE:="build/sed-in-place"}" -YQ_CMD_DELETE=($yq delete -i) -YQ_CMD_MERGE_OVERWRITE=($yq merge --inplace --overwrite --prettyPrint) -YQ_CMD_MERGE=($yq merge --inplace --append -P ) -YQ_CMD_WRITE=($yq write --inplace -P ) -OPERATOR_YAML_FILE_K8S="cluster/examples/kubernetes/ceph/operator.yaml" -OPERATOR_YAML_FILE_OCP="cluster/examples/kubernetes/ceph/operator-openshift.yaml" -COMMON_YAML_FILE="cluster/examples/kubernetes/ceph/common.yaml" -CSV_PATH="$OLM_CATALOG_DIR/deploy/olm-catalog/${PLATFORM}/${VERSION}" -CSV_BUNDLE_PATH="${CSV_PATH}/manifests" -CSV_FILE_NAME="$CSV_BUNDLE_PATH/ceph.clusterserviceversion.yaml" -OP_SDK_CMD=($operator_sdk generate csv --output-dir="deploy/olm-catalog/${PLATFORM}/${VERSION}" --csv-version) -OLM_OPERATOR_YAML_FILE="$OLM_CATALOG_DIR/deploy/operator.yaml" -OLM_ROLE_YAML_FILE="$OLM_CATALOG_DIR/deploy/role.yaml" -OLM_ROLE_BINDING_YAML_FILE="$OLM_CATALOG_DIR/deploy/role_binding.yaml" -OLM_SERVICE_ACCOUNT_YAML_FILE="$OLM_CATALOG_DIR/deploy/service_account.yaml" -CEPH_EXTERNAL_SCRIPT_FILE="cluster/examples/kubernetes/ceph/create-external-cluster-resources.py" - -if [[ -d "$CSV_BUNDLE_PATH" ]]; then - echo "$CSV_BUNDLE_PATH already exists, not doing anything." - exit 0 -fi - -############# -# FUNCTIONS # -############# -function create_directories(){ - mkdir -p "$CSV_PATH" - mkdir -p "$OLM_CATALOG_DIR/deploy/crds" -} - -function cleanup() { - "${YQ_CMD_DELETE[@]}" "$CSV_FILE_NAME" metadata.creationTimestamp - "${YQ_CMD_DELETE[@]}" "$CSV_FILE_NAME" 'spec.install.spec.deployments[0].spec.template.metadata.creationTimestamp' -} - -function generate_csv(){ - pushd "$OLM_CATALOG_DIR" &> /dev/null - "${OP_SDK_CMD[@]}" "$VERSION" - popd &> /dev/null - - # cleanup to get the expected state before merging the real data from assembles - "${YQ_CMD_DELETE[@]}" "$CSV_FILE_NAME" 'spec.icon[*]' - "${YQ_CMD_DELETE[@]}" "$CSV_FILE_NAME" 'spec.installModes[*]' - "${YQ_CMD_DELETE[@]}" "$CSV_FILE_NAME" 'spec.keywords[0]' - "${YQ_CMD_DELETE[@]}" "$CSV_FILE_NAME" 'spec.maintainers[0]' - - "${YQ_CMD_MERGE_OVERWRITE[@]}" "$CSV_FILE_NAME" "$ASSEMBLE_FILE_COMMON" - "${YQ_CMD_WRITE[@]}" "$CSV_FILE_NAME" metadata.annotations.externalClusterScript "$(base64 <$CEPH_EXTERNAL_SCRIPT_FILE)" - - if [[ "$PLATFORM" == "k8s" ]]; then - "${YQ_CMD_MERGE_OVERWRITE[@]}" "$CSV_FILE_NAME" "$ASSEMBLE_FILE_K8S" - "${YQ_CMD_WRITE[@]}" "$CSV_FILE_NAME" metadata.name "rook-ceph.v${VERSION}" - "${YQ_CMD_WRITE[@]}" "$CSV_FILE_NAME" spec.displayName "Rook-Ceph" - "${YQ_CMD_WRITE[@]}" "$CSV_FILE_NAME" metadata.annotations.createdAt "$(date +"%Y-%m-%dT%H-%M-%SZ")" - fi - - if [[ "$PLATFORM" == "ocp" ]]; then - "${YQ_CMD_MERGE[@]}" "$CSV_FILE_NAME" "$ASSEMBLE_FILE_OCP" - fi - - if [[ "$PLATFORM" == "okd" ]]; then - "${YQ_CMD_MERGE[@]}" "$CSV_FILE_NAME" "$ASSEMBLE_FILE_OKD" - "${YQ_CMD_WRITE[@]}" "$CSV_FILE_NAME" metadata.name "rook-ceph.v${VERSION}" - "${YQ_CMD_WRITE[@]}" "$CSV_FILE_NAME" spec.displayName "Rook-Ceph" - "${YQ_CMD_WRITE[@]}" "$CSV_FILE_NAME" metadata.annotations.createdAt "$(date +"%Y-%m-%dT%H-%M-%SZ")" - fi -} - -function generate_operator_yaml() { - platform=$2 - operator_file=$OPERATOR_YAML_FILE_K8S - if [[ "$platform" == "ocp" ]]; then - operator_file=$OPERATOR_YAML_FILE_OCP - fi - if [[ "$platform" == "okd" ]]; then - operator_file=$OPERATOR_YAML_FILE_OCP - fi - - sed -n '/^# OLM: BEGIN OPERATOR DEPLOYMENT$/,/# OLM: END OPERATOR DEPLOYMENT$/p' "$operator_file" > "$OLM_OPERATOR_YAML_FILE" -} - -function generate_role_yaml() { - sed -n '/^# OLM: BEGIN OPERATOR ROLE$/,/# OLM: END OPERATOR ROLE$/p' "$COMMON_YAML_FILE" > "$OLM_ROLE_YAML_FILE" - sed -n '/^# OLM: BEGIN CLUSTER ROLE$/,/# OLM: END CLUSTER ROLE$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_YAML_FILE" - - if [ "$OLM_INCLUDE_CEPHFS_CSI" = true ]; then - sed -n '/^# OLM: BEGIN CSI CEPHFS ROLE$/,/# OLM: END CSI CEPHFS ROLE$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_YAML_FILE" - sed -n '/^# OLM: BEGIN CSI CEPHFS CLUSTER ROLE$/,/# OLM: END CSI CEPHFS CLUSTER ROLE$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_YAML_FILE" - fi - if [ "$OLM_INCLUDE_RBD_CSI" = true ]; then - sed -n '/^# OLM: BEGIN CSI RBD ROLE$/,/# OLM: END CSI RBD ROLE$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_YAML_FILE" - sed -n '/^# OLM: BEGIN CSI RBD CLUSTER ROLE$/,/# OLM: END CSI RBD CLUSTER ROLE$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_YAML_FILE" - fi - if [ "$OLM_INCLUDE_REPORTER" = true ] ; then - sed -n '/^# OLM: BEGIN CMD REPORTER ROLE$/,/# OLM: END CMD REPORTER ROLE$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_YAML_FILE" - fi -} - -function generate_role_binding_yaml() { - sed -n '/^# OLM: BEGIN OPERATOR ROLEBINDING$/,/# OLM: END OPERATOR ROLEBINDING$/p' "$COMMON_YAML_FILE" > "$OLM_ROLE_BINDING_YAML_FILE" - sed -n '/^# OLM: BEGIN CLUSTER ROLEBINDING$/,/# OLM: END CLUSTER ROLEBINDING$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_BINDING_YAML_FILE" - if [ "$OLM_INCLUDE_CEPHFS_CSI" = true ]; then - sed -n '/^# OLM: BEGIN CSI CEPHFS ROLEBINDING$/,/# OLM: END CSI CEPHFS ROLEBINDING$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_BINDING_YAML_FILE" - sed -n '/^# OLM: BEGIN CSI CEPHFS CLUSTER ROLEBINDING$/,/# OLM: END CSI CEPHFS CLUSTER ROLEBINDING$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_BINDING_YAML_FILE" - fi - if [ "$OLM_INCLUDE_RBD_CSI" = true ]; then - sed -n '/^# OLM: BEGIN CSI RBD ROLEBINDING$/,/# OLM: END CSI RBD ROLEBINDING$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_BINDING_YAML_FILE" - sed -n '/^# OLM: BEGIN CSI RBD CLUSTER ROLEBINDING$/,/# OLM: END CSI RBD CLUSTER ROLEBINDING$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_BINDING_YAML_FILE" - fi - if [ "$OLM_INCLUDE_REPORTER" = true ] ; then - sed -n '/^# OLM: BEGIN CMD REPORTER ROLEBINDING$/,/# OLM: END CMD REPORTER ROLEBINDING$/p' "$COMMON_YAML_FILE" >> "$OLM_ROLE_BINDING_YAML_FILE" - fi -} - -function generate_service_account_yaml() { - sed -n '/^# OLM: BEGIN SERVICE ACCOUNT SYSTEM$/,/# OLM: END SERVICE ACCOUNT SYSTEM$/p' "$COMMON_YAML_FILE" > "$OLM_SERVICE_ACCOUNT_YAML_FILE" - sed -n '/^# OLM: BEGIN SERVICE ACCOUNT OSD$/,/# OLM: END SERVICE ACCOUNT OSD$/p' "$COMMON_YAML_FILE" >> "$OLM_SERVICE_ACCOUNT_YAML_FILE" - sed -n '/^# OLM: BEGIN SERVICE ACCOUNT MGR$/,/# OLM: END SERVICE ACCOUNT MGR$/p' "$COMMON_YAML_FILE" >> "$OLM_SERVICE_ACCOUNT_YAML_FILE" - if [ "$OLM_INCLUDE_CEPHFS_CSI" = true ]; then - sed -n '/^# OLM: BEGIN CSI CEPHFS SERVICE ACCOUNT$/,/# OLM: END CSI CEPHFS SERVICE ACCOUNT$/p' "$COMMON_YAML_FILE" >> "$OLM_SERVICE_ACCOUNT_YAML_FILE" - fi - if [ "$OLM_INCLUDE_RBD_CSI" = true ]; then - sed -n '/^# OLM: BEGIN CSI RBD SERVICE ACCOUNT$/,/# OLM: END CSI RBD SERVICE ACCOUNT$/p' "$COMMON_YAML_FILE" >> "$OLM_SERVICE_ACCOUNT_YAML_FILE" - fi - if [ "$OLM_INCLUDE_REPORTER" = true ] ; then - sed -n '/^# OLM: BEGIN CMD REPORTER SERVICE ACCOUNT$/,/# OLM: END CMD REPORTER SERVICE ACCOUNT$/p' "$COMMON_YAML_FILE" >> "$OLM_SERVICE_ACCOUNT_YAML_FILE" - fi -} - -function hack_csv() { - # Let's respect the following mapping - # somehow the operator-sdk command generates serviceAccountNames suffixed with '-rules' - # instead of the service account name - # So that function fixes that - - # rook-ceph-system --> serviceAccountName - # rook-ceph-cluster-mgmt --> rule - # rook-ceph-system - # rook-ceph-global - - # rook-ceph-mgr --> serviceAccountName - # rook-ceph-mgr --> rule - # rook-ceph-mgr-system --> rule - # rook-ceph-mgr-cluster - - # rook-ceph-osd --> serviceAccountName - # rook-ceph-osd --> rule - - $SED_IN_PLACE 's/rook-ceph-global/rook-ceph-system/' "$CSV_FILE_NAME" - $SED_IN_PLACE 's/rook-ceph-object-bucket/rook-ceph-system/' "$CSV_FILE_NAME" - $SED_IN_PLACE 's/rook-ceph-cluster-mgmt/rook-ceph-system/' "$CSV_FILE_NAME" - - $SED_IN_PLACE 's/rook-ceph-mgr-cluster/rook-ceph-mgr/' "$CSV_FILE_NAME" - $SED_IN_PLACE 's/rook-ceph-mgr-system/rook-ceph-mgr/' "$CSV_FILE_NAME" - - $SED_IN_PLACE 's/cephfs-csi-nodeplugin/rook-csi-cephfs-plugin-sa/' "$CSV_FILE_NAME" - $SED_IN_PLACE 's/cephfs-external-provisioner-runner/rook-csi-cephfs-provisioner-sa/' "$CSV_FILE_NAME" - - $SED_IN_PLACE 's/rbd-csi-nodeplugin/rook-csi-rbd-plugin-sa/' "$CSV_FILE_NAME" - $SED_IN_PLACE 's/rbd-external-provisioner-runner/rook-csi-rbd-provisioner-sa/' "$CSV_FILE_NAME" - # The operator-sdk also does not properly respect when - # Roles differ from the Service Account name - # The operator-sdk instead assumes the Role/ClusterRole is the ServiceAccount name - # - # To account for these mappings, we have to replace Role/ClusterRole names with - # the corresponding ServiceAccount. - $SED_IN_PLACE 's/cephfs-external-provisioner-cfg/rook-csi-cephfs-provisioner-sa/' "$CSV_FILE_NAME" - $SED_IN_PLACE 's/rbd-external-provisioner-cfg/rook-csi-rbd-provisioner-sa/' "$CSV_FILE_NAME" -} - -function generate_package() { - "${YQ_CMD_WRITE[@]}" "$PACKAGE_FILE" channels[0].currentCSV "rook-ceph.v${VERSION}" -} - -function apply_rook_op_img(){ - "${YQ_CMD_WRITE[@]}" "$CSV_FILE_NAME" metadata.annotations.containerImage "$ROOK_OP_VERSION" - "${YQ_CMD_WRITE[@]}" "$CSV_FILE_NAME" spec.install.spec.deployments[0].spec.template.spec.containers[0].image "$ROOK_OP_VERSION" -} - -######## -# MAIN # -######## -create_directories -generate_operator_yaml "$@" -generate_role_yaml -generate_role_binding_yaml -generate_service_account_yaml -generate_csv "$@" -hack_csv -if [ -z "${OLM_SKIP_PKG_FILE_GEN}" ]; then - generate_package -fi -apply_rook_op_img -cleanup - -echo "" -echo "Congratulations!" -echo "Your Rook CSV $VERSION manifest for $PLATFORM is ready at: $CSV_BUNDLE_PATH" -echo "Push it to https://github.com/operator-framework/community-operators as well as the CRDs from the same folder and the package file $PACKAGE_FILE." diff --git a/cmd/rook/cassandra/cassandra.go b/cmd/rook/cassandra/cassandra.go index 5ce24ac0a..1d4a32dea 100644 --- a/cmd/rook/cassandra/cassandra.go +++ b/cmd/rook/cassandra/cassandra.go @@ -28,7 +28,7 @@ var Cmd = &cobra.Command{ } var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "cassandracmd") + logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "cassandracmd") ) func init() { diff --git a/cmd/rook/cassandra/operator.go b/cmd/rook/cassandra/operator.go index f890ed5f2..834355aef 100644 --- a/cmd/rook/cassandra/operator.go +++ b/cmd/rook/cassandra/operator.go @@ -20,11 +20,11 @@ import ( "fmt" "time" - "github.com/rook/rook/cmd/rook/rook" - rookinformers "github.com/rook/rook/pkg/client/informers/externalversions" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller" - "github.com/rook/rook/pkg/util/flags" + "github.com/rook/cassandra/cmd/rook/rook" + rookinformers "github.com/rook/cassandra/pkg/client/informers/externalversions" + "github.com/rook/cassandra/pkg/operator/cassandra/constants" + "github.com/rook/cassandra/pkg/operator/cassandra/controller" + "github.com/rook/cassandra/pkg/util/flags" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/server" @@ -37,7 +37,7 @@ var operatorCmd = &cobra.Command{ Use: "operator", Short: "Runs the cassandra operator to deploy and manage cassandra in Kubernetes", Long: `Runs the cassandra operator to deploy and manage cassandra in kubernetes clusters. -https://github.com/rook/rook`, +https://github.com/rook/cassandra`, } func init() { diff --git a/cmd/rook/cassandra/sidecar.go b/cmd/rook/cassandra/sidecar.go index 742e694c2..0da74c5e4 100644 --- a/cmd/rook/cassandra/sidecar.go +++ b/cmd/rook/cassandra/sidecar.go @@ -20,10 +20,10 @@ import ( "fmt" "os" - "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/operator/cassandra/sidecar" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/flags" + "github.com/rook/cassandra/cmd/rook/rook" + "github.com/rook/cassandra/pkg/operator/cassandra/sidecar" + "github.com/rook/cassandra/pkg/operator/k8sutil" + "github.com/rook/cassandra/pkg/util/flags" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/server" @@ -35,7 +35,7 @@ var sidecarCmd = &cobra.Command{ Use: "sidecar", Short: "Runs the cassandra sidecar to deploy and manage cassandra in Kubernetes", Long: `Runs the cassandra sidecar to deploy and manage cassandra in kubernetes clusters. -https://github.com/rook/rook`, +https://github.com/rook/cassandra`, } func init() { diff --git a/cmd/rook/ceph/admission.go b/cmd/rook/ceph/admission.go deleted file mode 100644 index 8f7945e4e..000000000 --- a/cmd/rook/ceph/admission.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ceph - -import ( - "github.com/rook/rook/cmd/rook/rook" - operator "github.com/rook/rook/pkg/operator/ceph" - "github.com/spf13/cobra" -) - -var ( - admissionCmd = &cobra.Command{ - Use: "admission-controller", - Short: "Starts admission controller", - } -) - -func init() { - admissionCmd.Run = startAdmissionController -} - -func startAdmissionController(cmd *cobra.Command, args []string) { - rook.SetLogLevel() - rook.LogStartupInfo(admissionCmd.Flags()) - err := operator.StartAdmissionController() - if err != nil { - rook.TerminateFatal(err) - } -} diff --git a/cmd/rook/ceph/agent.go b/cmd/rook/ceph/agent.go deleted file mode 100644 index e53b78da9..000000000 --- a/cmd/rook/ceph/agent.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ceph - -import ( - "github.com/pkg/errors" - "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/daemon/ceph/agent" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" -) - -var agentCmd = &cobra.Command{ - Use: "agent", - Short: "Runs the rook ceph agent", -} - -func init() { - flags.SetFlagsFromEnv(agentCmd.Flags(), rook.RookEnvVarPrefix) - agentCmd.RunE = startAgent -} - -func startAgent(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - - rook.LogStartupInfo(agentCmd.Flags()) - - logger.Infof("starting rook ceph agent") - - context := rook.NewContext() - agent := agent.New(context) - err := agent.Run() - if err != nil { - rook.TerminateFatal(errors.Wrapf(err, "failed to run rook ceph agent\n")) - } - - return nil -} diff --git a/cmd/rook/ceph/ceph.go b/cmd/rook/ceph/ceph.go deleted file mode 100644 index 8b099ad1d..000000000 --- a/cmd/rook/ceph/ceph.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ceph - -import ( - "os" - - "github.com/coreos/pkg/capnslog" - "github.com/spf13/cobra" - - "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - osdconfig "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" - "github.com/rook/rook/pkg/operator/k8sutil" -) - -// Cmd is the main command for operator and daemons. -var Cmd = &cobra.Command{ - Use: "ceph", - Short: "Main command for Ceph operator and daemons.", -} - -var ( - cfg = &config{} - clusterInfo cephclient.ClusterInfo - logger = capnslog.NewPackageLogger("github.com/rook/rook", "cephcmd") -) - -type config struct { - devices string - metadataDevice string - dataDir string - forceFormat bool - location string - cephConfigOverride string - storeConfig osdconfig.StoreConfig - networkInfo clusterd.NetworkInfo - monEndpoints string - nodeName string - pvcBacked bool -} - -func init() { - Cmd.AddCommand(cleanUpCmd, - operatorCmd, - agentCmd, - admissionCmd, - osdCmd, - mgrCmd, - configCmd) -} - -func createContext() *clusterd.Context { - context := rook.NewContext() - context.ConfigDir = cfg.dataDir - context.ConfigFileOverride = cfg.cephConfigOverride - context.NetworkInfo = cfg.NetworkInfo() - return context -} - -func addCephFlags(command *cobra.Command) { - command.Flags().StringVar(&cfg.networkInfo.PublicAddr, "public-ip", "", "public IP address for this machine") - command.Flags().StringVar(&cfg.networkInfo.ClusterAddr, "private-ip", "", "private IP address for this machine") - command.Flags().StringVar(&clusterInfo.FSID, "fsid", "", "the cluster uuid") - command.Flags().StringVar(&clusterInfo.MonitorSecret, "mon-secret", "", "the cephx keyring for monitors") - command.Flags().StringVar(&clusterInfo.CephCred.Username, "ceph-username", "", "ceph username") - command.Flags().StringVar(&clusterInfo.CephCred.Secret, "ceph-secret", "", "secret for the ceph user (random if not specified)") - command.Flags().StringVar(&cfg.monEndpoints, "mon-endpoints", "", "ceph mon endpoints") - command.Flags().StringVar(&cfg.dataDir, "config-dir", "/var/lib/rook", "directory for storing configuration") - command.Flags().StringVar(&cfg.cephConfigOverride, "ceph-config-override", "", "optional path to a ceph config file that will be appended to the config files that rook generates") - - clusterInfo.Namespace = os.Getenv(k8sutil.PodNamespaceEnvVar) - - // deprecated ipv4 format address - // TODO: remove these legacy flags in the future - command.Flags().StringVar(&cfg.networkInfo.PublicAddrIPv4, "public-ipv4", "", "public IPv4 address for this machine") - command.Flags().StringVar(&cfg.networkInfo.ClusterAddrIPv4, "private-ipv4", "", "private IPv4 address for this machine") -} - -func (c *config) NetworkInfo() clusterd.NetworkInfo { - return c.networkInfo.Simplify() -} diff --git a/cmd/rook/ceph/cleanup.go b/cmd/rook/ceph/cleanup.go deleted file mode 100644 index fe16107cd..000000000 --- a/cmd/rook/ceph/cleanup.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ceph - -import ( - "os" - - "github.com/rook/rook/cmd/rook/rook" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - cleanup "github.com/rook/rook/pkg/daemon/ceph/cleanup" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" -) - -var ( - dataDirHostPath string - namespaceDir string - monSecret string - clusterFSID string - sanitizeMethod string - sanitizeDataSource string - sanitizeIteration int32 -) - -var cleanUpCmd = &cobra.Command{ - Use: "clean", - Short: "Starts the cleanup process on the disks after ceph cluster is deleted", -} - -func init() { - cleanUpCmd.Flags().StringVar(&dataDirHostPath, "data-dir-host-path", "", "dataDirHostPath on the node") - cleanUpCmd.Flags().StringVar(&namespaceDir, "namespace-dir", "", "dataDirHostPath on the node") - cleanUpCmd.Flags().StringVar(&monSecret, "mon-secret", "", "monitor secret from the keyring") - cleanUpCmd.Flags().StringVar(&clusterFSID, "cluster-fsid", "", "ceph cluster fsid") - cleanUpCmd.Flags().StringVar(&sanitizeMethod, "sanitize-method", string(cephv1.SanitizeMethodQuick), "sanitize method to use (metadata or data)") - cleanUpCmd.Flags().StringVar(&sanitizeDataSource, "sanitize-data-source", string(cephv1.SanitizeDataSourceZero), "data source to sanitize the disk (zero or random)") - cleanUpCmd.Flags().Int32Var(&sanitizeIteration, "sanitize-iteration", 1, "overwrite N times the disk") - flags.SetFlagsFromEnv(cleanUpCmd.Flags(), rook.RookEnvVarPrefix) - cleanUpCmd.RunE = startCleanUp -} - -func startCleanUp(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(cleanUpCmd.Flags()) - - logger.Info("starting cluster clean up") - // Delete dataDirHostPath - if dataDirHostPath != "" { - // Remove both dataDirHostPath and monitor store - cleanup.StartHostPathCleanup(namespaceDir, dataDirHostPath, monSecret) - } - - namespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - clusterInfo := client.AdminClusterInfo(namespace) - clusterInfo.FSID = clusterFSID - - // Build Sanitizer - s := cleanup.NewDiskSanitizer(createContext(), - clusterInfo, - &cephv1.SanitizeDisksSpec{ - Method: cephv1.SanitizeMethodProperty(sanitizeMethod), - DataSource: cephv1.SanitizeDataSourceProperty(sanitizeDataSource), - Iteration: sanitizeIteration, - }, - ) - - // Start OSD wipe process - s.StartSanitizeDisks() - - return nil -} diff --git a/cmd/rook/ceph/config.go b/cmd/rook/ceph/config.go deleted file mode 100644 index ca4c9f8f8..000000000 --- a/cmd/rook/ceph/config.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ceph - -import ( - "io/ioutil" - "os" - - "github.com/pkg/errors" - "github.com/rook/rook/cmd/rook/rook" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/util" - "github.com/spf13/cobra" -) - -var configCmd = &cobra.Command{ - Use: "config-init", - Short: "Generates basic Ceph config", - Long: `Generate the most basic Ceph config for connecting non-Ceph daemons to a Ceph -cluster (e.g., nfs-ganesha). Effectively what this means is that it generates -'/etc/ceph/ceph.conf' with 'mon_host' populated and a keyring path (given via -commandline flag) associated with the user given via commandline flag. -'mon_host' is determined by the 'ROOK_CEPH_MON_HOST' env var present in other -Ceph daemon pods, and the keyring is expected to be mounted into the container -with a Kubernetes pod volume+mount.`, -} - -var ( - keyring string - username string -) - -func init() { - configCmd.Flags().StringVar(&keyring, "keyring", "", "path to the keyring file") - if err := configCmd.MarkFlagRequired("keyring"); err != nil { - panic(err) - } - - configCmd.Flags().StringVar(&username, "username", "", "the daemon username") - if err := configCmd.MarkFlagRequired("username"); err != nil { - panic(err) - } - - configCmd.RunE = initConfig -} - -func initConfig(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - - rook.LogStartupInfo(configCmd.Flags()) - - if keyring == "" { - rook.TerminateFatal(errors.New("keyring is empty string")) - } - if username == "" { - rook.TerminateFatal(errors.New("username is empty string")) - } - - monHost := os.Getenv("ROOK_CEPH_MON_HOST") - if monHost == "" { - rook.TerminateFatal(errors.New("ROOK_CEPH_MON_HOST is not set or is empty string")) - } - - cfg := ` -[global] -mon_host = ` + monHost + ` - -[` + username + `] -keyring = ` + keyring + ` -` - - var fileMode os.FileMode = 0444 // read-only - err := ioutil.WriteFile(cephclient.DefaultConfigFilePath(), []byte(cfg), fileMode) - if err != nil { - rook.TerminateFatal(errors.Wrapf(err, "failed to write config file")) - } - - util.WriteFileToLog(logger, cephclient.DefaultConfigFilePath()) - - return nil -} diff --git a/cmd/rook/ceph/mgr.go b/cmd/rook/ceph/mgr.go deleted file mode 100644 index 9ab42fed6..000000000 --- a/cmd/rook/ceph/mgr.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ceph - -import ( - "time" - - "github.com/rook/rook/cmd/rook/rook" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" -) - -var mgrCmd = &cobra.Command{ - Use: "mgr", -} -var mgrSidecarCmd = &cobra.Command{ - Use: "watch-active", -} -var ( - updateMgrServicesInterval string - daemonName string - clusterSpec cephv1.ClusterSpec - rawCephVersion string -) - -func init() { - addCephFlags(mgrCmd) - - // add the subcommands to the parent mgr command - mgrCmd.AddCommand(mgrSidecarCmd) - - mgrSidecarCmd.Flags().BoolVar(&clusterSpec.Dashboard.Enabled, "dashboard-enabled", false, "whether the dashboard is enabled") - mgrSidecarCmd.Flags().BoolVar(&clusterSpec.Monitoring.Enabled, "monitoring-enabled", false, "whether the monitoring is enabled") - mgrSidecarCmd.Flags().StringVar(&updateMgrServicesInterval, "update-interval", "", "the interval at which to update the mgr services") - mgrSidecarCmd.Flags().StringVar(&ownerRefID, "cluster-id", "", "the UID of the cluster CR that owns this cluster") - mgrSidecarCmd.Flags().StringVar(&clusterName, "cluster-name", "", "the name of the cluster CR that owns this cluster") - mgrSidecarCmd.Flags().StringVar(&daemonName, "daemon-name", "", "the name of the local mgr daemon") - mgrSidecarCmd.Flags().StringVar(&rawCephVersion, "ceph-version", "", "the version of ceph") - - flags.SetFlagsFromEnv(mgrCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetFlagsFromEnv(mgrSidecarCmd.Flags(), rook.RookEnvVarPrefix) - mgrSidecarCmd.RunE = runMgrSidecar -} - -// Start the mgr daemon sidecar -func runMgrSidecar(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - - context := createContext() - clusterInfo.Monitors = mon.ParseMonEndpoints(cfg.monEndpoints) - rook.LogStartupInfo(mgrSidecarCmd.Flags()) - - ownerRef := opcontroller.ClusterOwnerRef(clusterName, ownerRefID) - clusterInfo.OwnerInfo = k8sutil.NewOwnerInfoWithOwnerRef(&ownerRef, clusterInfo.Namespace) - - if err := client.WriteCephConfig(context, &clusterInfo); err != nil { - rook.TerminateFatal(err) - } - - interval, err := time.ParseDuration(updateMgrServicesInterval) - if err != nil { - rook.TerminateFatal(err) - } - - version, err := cephver.ExtractCephVersion(rawCephVersion) - if err != nil { - rook.TerminateFatal(err) - } - clusterInfo.CephVersion = *version - - m := mgr.New(context, &clusterInfo, clusterSpec, "") - for { - err := m.ReconcileActiveMgrServices(daemonName) - if err != nil { - logger.Errorf("failed to reconcile services. %v", err) - } else { - logger.Infof("successfully reconciled services. checking again in %ds", (int)(interval.Seconds())) - } - time.Sleep(interval) - } -} diff --git a/cmd/rook/ceph/operator.go b/cmd/rook/ceph/operator.go deleted file mode 100644 index 41765b1b3..000000000 --- a/cmd/rook/ceph/operator.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ceph - -import ( - "github.com/pkg/errors" - "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - operator "github.com/rook/rook/pkg/operator/ceph" - cluster "github.com/rook/rook/pkg/operator/ceph/cluster" - "github.com/rook/rook/pkg/operator/ceph/csi" - - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" -) - -const ( - containerName = "rook-ceph-operator" -) - -var operatorCmd = &cobra.Command{ - Use: "operator", - Short: "Runs the Ceph operator for orchestrating and managing Ceph storage in a Kubernetes cluster", - Long: `Runs the Ceph operator for orchestrating and managing Ceph storage in a Kubernetes cluster -https://github.com/rook/rook`, -} - -func init() { - // csi deployment templates - operatorCmd.Flags().StringVar(&csi.RBDPluginTemplatePath, "csi-rbd-plugin-template-path", csi.DefaultRBDPluginTemplatePath, "path to ceph-csi rbd plugin template") - - operatorCmd.Flags().StringVar(&csi.RBDProvisionerDepTemplatePath, "csi-rbd-provisioner-dep-template-path", csi.DefaultRBDProvisionerDepTemplatePath, "path to ceph-csi rbd provisioner deployment template") - - operatorCmd.Flags().StringVar(&csi.CephFSPluginTemplatePath, "csi-cephfs-plugin-template-path", csi.DefaultCephFSPluginTemplatePath, "path to ceph-csi cephfs plugin template") - operatorCmd.Flags().StringVar(&csi.CephFSProvisionerDepTemplatePath, "csi-cephfs-provisioner-dep-template-path", csi.DefaultCephFSProvisionerDepTemplatePath, "path to ceph-csi cephfs provisioner deployment template") - - operatorCmd.Flags().BoolVar(&cluster.EnableMachineDisruptionBudget, "enable-machine-disruption-budget", false, "enable fencing controllers") - - flags.SetFlagsFromEnv(operatorCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetLoggingFlags(operatorCmd.Flags()) - operatorCmd.RunE = startOperator -} - -func startOperator(cmd *cobra.Command, args []string) error { - - rook.SetLogLevel() - - rook.LogStartupInfo(operatorCmd.Flags()) - - logger.Info("starting Rook-Ceph operator") - context := createContext() - context.NetworkInfo = clusterd.NetworkInfo{} - context.ConfigDir = k8sutil.DataDir - volumeAttachment, err := attachment.New(context) - if err != nil { - rook.TerminateFatal(err) - } - - rook.CheckOperatorResources(context.Clientset) - rookImage := rook.GetOperatorImage(context.Clientset, containerName) - rookBaseImageCephVersion, err := rook.GetOperatorBaseImageCephVersion(context) - if err != nil { - logger.Errorf("failed to get operator base image ceph version. %v", err) - } - opcontroller.OperatorCephBaseImageVersion = rookBaseImageCephVersion - logger.Infof("base ceph version inside the rook operator image is %q", opcontroller.OperatorCephBaseImageVersion) - - serviceAccountName := rook.GetOperatorServiceAccount(context.Clientset) - op := operator.New(context, volumeAttachment, rookImage, serviceAccountName) - err = op.Run() - if err != nil { - rook.TerminateFatal(errors.Wrap(err, "failed to run operator\n")) - } - - return nil -} diff --git a/cmd/rook/ceph/osd.go b/cmd/rook/ceph/osd.go deleted file mode 100644 index 436b4b4e7..000000000 --- a/cmd/rook/ceph/osd.go +++ /dev/null @@ -1,325 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ceph - -import ( - "encoding/json" - "os" - "strings" - - "k8s.io/client-go/kubernetes" - - "github.com/pkg/errors" - "github.com/rook/rook/cmd/rook/rook" - osddaemon "github.com/rook/rook/pkg/daemon/ceph/osd" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - oposd "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - osdcfg "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" -) - -var osdCmd = &cobra.Command{ - Use: "osd", - Short: "Provisions and runs the osd daemon", -} -var osdConfigCmd = &cobra.Command{ - Use: "init", - Short: "Updates ceph.conf for the osd", -} -var provisionCmd = &cobra.Command{ - Use: "provision", - Short: "Generates osd config and prepares an osd for runtime", -} -var osdStartCmd = &cobra.Command{ - Use: "start", - Short: "Starts the osd daemon", // OSDs that were provisioned by ceph-volume -} -var osdRemoveCmd = &cobra.Command{ - Use: "remove", - Short: "Removes a set of OSDs from the cluster", -} - -var ( - osdDataDeviceFilter string - osdDataDevicePathFilter string - ownerRefID string - clusterName string - osdID int - osdStoreType string - osdStringID string - osdUUID string - osdIsDevice bool - pvcBackedOSD bool - blockPath string - lvBackedPV bool - osdIDsToRemove string - preservePVC bool -) - -func addOSDFlags(command *cobra.Command) { - addOSDConfigFlags(osdConfigCmd) - addOSDConfigFlags(provisionCmd) - - // flags specific to provisioning - provisionCmd.Flags().StringVar(&cfg.devices, "data-devices", "", "comma separated list of devices to use for storage") - provisionCmd.Flags().StringVar(&osdDataDeviceFilter, "data-device-filter", "", "a regex filter for the device names to use, or \"all\"") - provisionCmd.Flags().StringVar(&osdDataDevicePathFilter, "data-device-path-filter", "", "a regex filter for the device path names to use") - provisionCmd.Flags().StringVar(&cfg.metadataDevice, "metadata-device", "", "device to use for metadata (e.g. a high performance SSD/NVMe device)") - provisionCmd.Flags().BoolVar(&cfg.forceFormat, "force-format", false, - "true to force the format of any specified devices, even if they already have a filesystem. BE CAREFUL!") - provisionCmd.Flags().BoolVar(&cfg.pvcBacked, "pvc-backed-osd", false, "true to specify a block mode pvc is backing the OSD") - // flags for generating the osd config - osdConfigCmd.Flags().IntVar(&osdID, "osd-id", -1, "osd id for which to generate config") - osdConfigCmd.Flags().BoolVar(&osdIsDevice, "is-device", false, "whether the osd is a device") - - // flags for running osds that were provisioned by ceph-volume - osdStartCmd.Flags().StringVar(&osdStringID, "osd-id", "", "the osd ID") - osdStartCmd.Flags().StringVar(&osdUUID, "osd-uuid", "", "the osd UUID") - osdStartCmd.Flags().StringVar(&osdStoreType, "osd-store-type", "", "the osd store type such as bluestore") - osdStartCmd.Flags().BoolVar(&pvcBackedOSD, "pvc-backed-osd", false, "Whether the OSD backing store in PVC or not") - osdStartCmd.Flags().StringVar(&blockPath, "block-path", "", "Block path for the OSD created by ceph-volume") - osdStartCmd.Flags().BoolVar(&lvBackedPV, "lv-backed-pv", false, "Whether the PV located on LV") - - // flags for removing OSDs that are unhealthy or otherwise should be purged from the cluster - osdRemoveCmd.Flags().StringVar(&osdIDsToRemove, "osd-ids", "", "OSD IDs to remove from the cluster") - osdRemoveCmd.Flags().BoolVar(&preservePVC, "preserve-pvc", false, "Whether PVCs for OSDs will be deleted") - - // add the subcommands to the parent osd command - osdCmd.AddCommand(osdConfigCmd, - provisionCmd, - osdStartCmd, - osdRemoveCmd) -} - -func addOSDConfigFlags(command *cobra.Command) { - command.Flags().StringVar(&ownerRefID, "cluster-id", "", "the UID of the cluster CR that owns this cluster") - command.Flags().StringVar(&clusterName, "cluster-name", "", "the name of the cluster CR that owns this cluster") - command.Flags().StringVar(&cfg.location, "location", "", "location of this node for CRUSH placement") - command.Flags().StringVar(&cfg.nodeName, "node-name", os.Getenv("HOSTNAME"), "the host name of the node") - - // OSD store config flags - command.Flags().IntVar(&cfg.storeConfig.WalSizeMB, "osd-wal-size", osdcfg.WalDefaultSizeMB, "default size (MB) for OSD write ahead log (WAL) (bluestore)") - command.Flags().IntVar(&cfg.storeConfig.DatabaseSizeMB, "osd-database-size", 0, "default size (MB) for OSD database (bluestore)") - command.Flags().IntVar(&cfg.storeConfig.OSDsPerDevice, "osds-per-device", 1, "the number of OSDs per device") - command.Flags().BoolVar(&cfg.storeConfig.EncryptedDevice, "encrypted-device", false, "whether to encrypt the OSD with dmcrypt") - command.Flags().StringVar(&cfg.storeConfig.DeviceClass, "osd-crush-device-class", "", "The device class for all OSDs configured on this node") - command.Flags().StringVar(&cfg.storeConfig.InitialWeight, "osd-crush-initial-weight", "", "The initial weight of OSD in TiB units") -} - -func init() { - addOSDFlags(osdCmd) - addCephFlags(osdCmd) - flags.SetFlagsFromEnv(osdCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetFlagsFromEnv(osdConfigCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetFlagsFromEnv(provisionCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetFlagsFromEnv(osdStartCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetFlagsFromEnv(osdRemoveCmd.Flags(), rook.RookEnvVarPrefix) - - osdConfigCmd.RunE = writeOSDConfig - provisionCmd.RunE = prepareOSD - osdStartCmd.RunE = startOSD - osdRemoveCmd.RunE = removeOSDs -} - -// Start the osd daemon if provisioned by ceph-volume -func startOSD(cmd *cobra.Command, args []string) error { - required := []string{"osd-id", "osd-uuid"} - if err := flags.VerifyRequiredFlags(osdStartCmd, required); err != nil { - return err - } - - commonOSDInit(osdStartCmd) - - context := createContext() - - // Run OSD start sequence - err := osddaemon.StartOSD(context, osdStoreType, osdStringID, osdUUID, blockPath, pvcBackedOSD, lvBackedPV, args) - if err != nil { - rook.TerminateFatal(err) - } - return nil -} - -func verifyConfigFlags(configCmd *cobra.Command) error { - required := []string{"cluster-id", "node-name"} - if err := flags.VerifyRequiredFlags(configCmd, required); err != nil { - return err - } - required = []string{"mon-endpoints", "mon-secret", "ceph-username", "ceph-secret"} - if err := flags.VerifyRequiredFlags(osdCmd, required); err != nil { - return err - } - return nil -} - -func writeOSDConfig(cmd *cobra.Command, args []string) error { - if err := verifyConfigFlags(osdConfigCmd); err != nil { - return err - } - if osdID == -1 { - return errors.New("osd id not specified") - } - - commonOSDInit(osdConfigCmd) - - return nil -} - -// Provision a device or directory for an OSD -func prepareOSD(cmd *cobra.Command, args []string) error { - if err := verifyConfigFlags(provisionCmd); err != nil { - return err - } - - var dataDevices []osddaemon.DesiredDevice - if osdDataDeviceFilter != "" { - if cfg.devices != "" || osdDataDevicePathFilter != "" { - return errors.New("only one of --data-devices, --data-device-filter and --data-device-path-filter can be specified") - } - - dataDevices = []osddaemon.DesiredDevice{ - {Name: osdDataDeviceFilter, IsFilter: true, OSDsPerDevice: cfg.storeConfig.OSDsPerDevice}, - } - } else if osdDataDevicePathFilter != "" { - if cfg.devices != "" { - return errors.New("only one of --data-devices, --data-device-filter and --data-device-path-filter can be specified") - } - - dataDevices = []osddaemon.DesiredDevice{ - {Name: osdDataDevicePathFilter, IsDevicePathFilter: true, OSDsPerDevice: cfg.storeConfig.OSDsPerDevice}, - } - } else { - var err error - dataDevices, err = parseDevices(cfg.devices) - if err != nil { - rook.TerminateFatal(errors.Wrapf(err, "failed to parse device list (%q)", cfg.devices)) - } - } - - context := createContext() - commonOSDInit(provisionCmd) - crushLocation, topologyAffinity, err := getLocation(context.Clientset) - if err != nil { - rook.TerminateFatal(err) - } - logger.Infof("crush location of osd: %s", crushLocation) - - forceFormat := false - - ownerRef := opcontroller.ClusterOwnerRef(clusterName, ownerRefID) - ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(&ownerRef, clusterInfo.Namespace) - clusterInfo.OwnerInfo = ownerInfo - kv := k8sutil.NewConfigMapKVStore(clusterInfo.Namespace, context.Clientset, ownerInfo) - agent := osddaemon.NewAgent(context, dataDevices, cfg.metadataDevice, forceFormat, - cfg.storeConfig, &clusterInfo, cfg.nodeName, kv, cfg.pvcBacked) - - err = osddaemon.Provision(context, agent, crushLocation, topologyAffinity) - if err != nil { - // something failed in the OSD orchestration, update the status map with failure details - status := oposd.OrchestrationStatus{ - Status: oposd.OrchestrationStatusFailed, - Message: err.Error(), - PvcBackedOSD: cfg.pvcBacked, - } - oposd.UpdateNodeStatus(kv, cfg.nodeName, status) - - rook.TerminateFatal(err) - } - - return nil -} - -// Purge the desired OSDs from the cluster -func removeOSDs(cmd *cobra.Command, args []string) error { - required := []string{"osd-ids"} - if err := flags.VerifyRequiredFlags(osdRemoveCmd, required); err != nil { - return err - } - required = []string{"mon-endpoints", "ceph-username", "ceph-secret"} - if err := flags.VerifyRequiredFlags(osdCmd, required); err != nil { - return err - } - - commonOSDInit(osdRemoveCmd) - - context := createContext() - - // Run OSD remove sequence - err := osddaemon.RemoveOSDs(context, &clusterInfo, strings.Split(osdIDsToRemove, ","), preservePVC) - if err != nil { - rook.TerminateFatal(err) - } - return nil -} - -func commonOSDInit(cmd *cobra.Command) { - rook.SetLogLevel() - rook.LogStartupInfo(cmd.Flags()) - - clusterInfo.Monitors = mon.ParseMonEndpoints(cfg.monEndpoints) -} - -// use zone/region/hostname labels in the crushmap -func getLocation(clientset kubernetes.Interface) (string, string, error) { - // get the value the operator instructed to use as the host name in the CRUSH map - hostNameLabel := os.Getenv("ROOK_CRUSHMAP_HOSTNAME") - - rootLabel := os.Getenv(oposd.CrushRootVarName) - - loc, topologyAffinity, err := oposd.GetLocationWithNode(clientset, os.Getenv(k8sutil.NodeNameEnvVar), rootLabel, hostNameLabel) - if err != nil { - return "", "", err - } - return loc, topologyAffinity, nil -} - -// Parse the devices, which are sent as a JSON-marshalled list of device IDs with a StorageConfig spec -func parseDevices(devices string) ([]osddaemon.DesiredDevice, error) { - if devices == "" { - return []osddaemon.DesiredDevice{}, nil - } - - configuredDevices := []osdcfg.ConfiguredDevice{} - err := json.Unmarshal([]byte(devices), &configuredDevices) - if err != nil { - return nil, errors.Wrapf(err, "failed to JSON unmarshal configured devices (%q)", devices) - } - - var result []osddaemon.DesiredDevice - for _, cd := range configuredDevices { - d := osddaemon.DesiredDevice{ - Name: cd.ID, - } - d.OSDsPerDevice = cd.StoreConfig.OSDsPerDevice - d.DatabaseSizeMB = cd.StoreConfig.DatabaseSizeMB - d.DeviceClass = cd.StoreConfig.DeviceClass - d.InitialWeight = cd.StoreConfig.InitialWeight - d.MetadataDevice = cd.StoreConfig.MetadataDevice - - if d.OSDsPerDevice < 1 { - return nil, errors.Errorf("osds per device should be greater than 0 (%q)", d.OSDsPerDevice) - } - - result = append(result, d) - } - - logger.Infof("desired devices to configure osds: %+v", result) - return result, nil -} diff --git a/cmd/rook/ceph/osd_test.go b/cmd/rook/ceph/osd_test.go deleted file mode 100644 index 7560849df..000000000 --- a/cmd/rook/ceph/osd_test.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ceph - -import ( - "encoding/json" - "testing" - - osddaemon "github.com/rook/rook/pkg/daemon/ceph/osd" - osdcfg "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" - "github.com/stretchr/testify/assert" -) - -func TestParseDesiredDevices(t *testing.T) { - configuredDevices := []osdcfg.ConfiguredDevice{ - { - ID: "sda", - StoreConfig: osdcfg.StoreConfig{ - OSDsPerDevice: 1, - }, - }, - { - ID: "sdb", - StoreConfig: osdcfg.StoreConfig{ - OSDsPerDevice: 1, - }, - }, - { - ID: "nvme01", - StoreConfig: osdcfg.StoreConfig{ - OSDsPerDevice: 5, - }, - }, - } - marshalledDevices, err := json.Marshal(configuredDevices) - assert.NoError(t, err) - devices := string(marshalledDevices) - - result, err := parseDevices(devices) - assert.NoError(t, err) - assert.Equal(t, 3, len(result)) - assert.Equal(t, "sda", result[0].Name) - assert.Equal(t, "sdb", result[1].Name) - assert.Equal(t, "nvme01", result[2].Name) - assert.Equal(t, 1, result[0].OSDsPerDevice) - assert.Equal(t, 1, result[1].OSDsPerDevice) - assert.Equal(t, 5, result[2].OSDsPerDevice) - assert.False(t, result[0].IsFilter) - assert.False(t, result[1].IsFilter) - assert.False(t, result[2].IsFilter) - assert.False(t, result[0].IsDevicePathFilter) - assert.False(t, result[1].IsDevicePathFilter) - assert.False(t, result[2].IsDevicePathFilter) - - // negative osd count is not allowed - configuredDevices = []osdcfg.ConfiguredDevice{ - { - ID: "nvme01", - StoreConfig: osdcfg.StoreConfig{ - OSDsPerDevice: -5, - }, - }, - } - marshalledDevices, err = json.Marshal(configuredDevices) - assert.NoError(t, err) - devices = string(marshalledDevices) - - result, err = parseDevices(devices) - assert.Nil(t, result) - assert.Error(t, err) - - // 0 osd count is not allowed - configuredDevices = []osdcfg.ConfiguredDevice{ - { - ID: "nvme01", - StoreConfig: osdcfg.StoreConfig{ - OSDsPerDevice: 0, - }, - }, - } - marshalledDevices, err = json.Marshal(configuredDevices) - assert.NoError(t, err) - devices = string(marshalledDevices) - - result, err = parseDevices(devices) - assert.Nil(t, result) - assert.Error(t, err) - - // OSDsPerDevice, metadataDevice, databaseSizeMB and deviceClass - configuredDevices = []osdcfg.ConfiguredDevice{ - { - ID: "sdd", - StoreConfig: osdcfg.StoreConfig{ - OSDsPerDevice: 1, - DatabaseSizeMB: 2048, - MetadataDevice: "sdb", - }, - }, - { - ID: "sde", - StoreConfig: osdcfg.StoreConfig{ - OSDsPerDevice: 1, - MetadataDevice: "sdb", - }, - }, - { - ID: "sdf", - StoreConfig: osdcfg.StoreConfig{ - OSDsPerDevice: 1, - MetadataDevice: "sdc", - }, - }, - { - ID: "sdg", - StoreConfig: osdcfg.StoreConfig{ - OSDsPerDevice: 1, - DeviceClass: "tst", - MetadataDevice: "sdc", - }, - }, - } - marshalledDevices, err = json.Marshal(configuredDevices) - assert.NoError(t, err) - devices = string(marshalledDevices) - - result, err = parseDevices(devices) - assert.NoError(t, err) - assert.Equal(t, "sdd", result[0].Name) - assert.Equal(t, "sde", result[1].Name) - assert.Equal(t, "sdf", result[2].Name) - assert.Equal(t, "sdg", result[3].Name) - assert.Equal(t, 1, result[0].OSDsPerDevice) - assert.Equal(t, 1, result[1].OSDsPerDevice) - assert.Equal(t, 1, result[2].OSDsPerDevice) - assert.Equal(t, 1, result[3].OSDsPerDevice) - assert.Equal(t, 2048, result[0].DatabaseSizeMB) - assert.Equal(t, 0, result[1].DatabaseSizeMB) - assert.Equal(t, 0, result[2].DatabaseSizeMB) - assert.Equal(t, 0, result[3].DatabaseSizeMB) - assert.Equal(t, "", result[0].DeviceClass) - assert.Equal(t, "", result[1].DeviceClass) - assert.Equal(t, "", result[2].DeviceClass) - assert.Equal(t, "tst", result[3].DeviceClass) - assert.Equal(t, "sdb", result[0].MetadataDevice) - assert.Equal(t, "sdb", result[1].MetadataDevice) - assert.Equal(t, "sdc", result[2].MetadataDevice) - assert.Equal(t, "sdc", result[3].MetadataDevice) - assert.False(t, result[0].IsFilter) - assert.False(t, result[1].IsFilter) - assert.False(t, result[2].IsFilter) - assert.False(t, result[3].IsFilter) - assert.False(t, result[0].IsDevicePathFilter) - assert.False(t, result[1].IsDevicePathFilter) - assert.False(t, result[2].IsDevicePathFilter) - assert.False(t, result[3].IsDevicePathFilter) - - // check empty devices list - result, err = parseDevices("") - assert.NoError(t, err) - assert.Equal(t, []osddaemon.DesiredDevice{}, result) -} diff --git a/cmd/rook/discover.go b/cmd/rook/discover.go deleted file mode 100644 index 468515a46..000000000 --- a/cmd/rook/discover.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package main - -import ( - "time" - - rook "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/daemon/discover" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" -) - -var ( - discoverCmd = &cobra.Command{ - Use: "discover", - Short: "Discover devices", - } - - // interval between discovering devices - discoverDevicesInterval time.Duration - - // Uses ceph-volume inventory to extend devices information - usesCVInventory bool -) - -func init() { - discoverCmd.Flags().DurationVar(&discoverDevicesInterval, "discover-interval", 60*time.Minute, "interval between discovering devices (default 60m)") - discoverCmd.Flags().BoolVar(&usesCVInventory, "use-ceph-volume", false, "Use ceph-volume inventory to extend storage devices information (default false)") - - flags.SetFlagsFromEnv(discoverCmd.Flags(), rook.RookEnvVarPrefix) - discoverCmd.RunE = startDiscover -} - -func startDiscover(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - - rook.LogStartupInfo(discoverCmd.Flags()) - - context := rook.NewContext() - - err := discover.Run(context, discoverDevicesInterval, usesCVInventory) - if err != nil { - rook.TerminateFatal(err) - } - - return nil -} diff --git a/cmd/rook/main.go b/cmd/rook/main.go index 2d7699d4b..e269c595a 100644 --- a/cmd/rook/main.go +++ b/cmd/rook/main.go @@ -18,12 +18,10 @@ package main import ( "fmt" - "github.com/rook/rook/cmd/rook/cassandra" - "github.com/rook/rook/cmd/rook/ceph" - "github.com/rook/rook/cmd/rook/nfs" - rook "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/cmd/rook/util" - "github.com/rook/rook/cmd/rook/version" + "github.com/rook/cassandra/cmd/rook/cassandra" + rook "github.com/rook/cassandra/cmd/rook/rook" + "github.com/rook/cassandra/cmd/rook/util" + "github.com/rook/cassandra/cmd/rook/version" ) func main() { @@ -36,10 +34,7 @@ func main() { func addCommands() { rook.RootCmd.AddCommand( version.VersionCmd, - discoverCmd, // backend commands - ceph.Cmd, - nfs.Cmd, cassandra.Cmd, // util commands diff --git a/cmd/rook/nfs/nfs.go b/cmd/rook/nfs/nfs.go deleted file mode 100644 index 8c5057757..000000000 --- a/cmd/rook/nfs/nfs.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "github.com/coreos/pkg/capnslog" - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "nfs", - Short: "Main command for NFS operator and daemons.", -} - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "nfscmd") -) - -func init() { - Cmd.AddCommand(operatorCmd) - Cmd.AddCommand(webhookCmd) - Cmd.AddCommand(provisonerCmd) - Cmd.AddCommand(serverCmd) -} diff --git a/cmd/rook/nfs/operator.go b/cmd/rook/nfs/operator.go deleted file mode 100644 index 2d6e8d0a3..000000000 --- a/cmd/rook/nfs/operator.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "github.com/rook/rook/cmd/rook/rook" - operator "github.com/rook/rook/pkg/operator/nfs" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" -) - -var operatorCmd = &cobra.Command{ - Use: "operator", - Short: "Runs the NFS operator to deploy and manage NFS server in kubernetes clusters", - Long: `Runs the NFS operator to deploy and manage NFS server in kubernetes clusters. -https://github.com/rook/rook`, -} - -func init() { - flags.SetFlagsFromEnv(operatorCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetLoggingFlags(operatorCmd.Flags()) - - operatorCmd.RunE = startOperator -} - -func startOperator(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(operatorCmd.Flags()) - - logger.Infof("starting NFS operator") - context := rook.NewContext() - op := operator.New(context) - err := op.Run() - rook.TerminateOnError(err, "failed to run operator") - - return nil -} diff --git a/cmd/rook/nfs/provisioner.go b/cmd/rook/nfs/provisioner.go deleted file mode 100644 index 49e41e3a8..000000000 --- a/cmd/rook/nfs/provisioner.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "errors" - - "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/operator/nfs" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" -) - -var provisonerCmd = &cobra.Command{ - Use: "provisioner", - Short: "Runs the NFS provisioner for provisioning volumes", - Long: "Runs the NFS provisioner for provisioning volumes from the rook provisioned nfs servers", -} - -var ( - provisioner *string -) - -func init() { - flags.SetFlagsFromEnv(provisonerCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetLoggingFlags(provisonerCmd.Flags()) - - provisioner = provisonerCmd.Flags().String("provisioner", "", "Name of the provisioner. The provisioner will only provision volumes for claims that request a StorageClass with a provisioner field set equal to this name.") - provisonerCmd.RunE = startProvisioner -} - -func startProvisioner(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(serverCmd.Flags()) - if len(*provisioner) == 0 { - return errors.New("--provisioner is a required parameter") - } - - rookContext := rook.NewContext() - clientset := rookContext.Clientset - rookClientset := rookContext.RookClientset - - serverVersion, err := clientset.Discovery().ServerVersion() - if err != nil { - logger.Fatalf("Error getting server version: %v", err) - } - - clientNFSProvisioner, err := nfs.NewNFSProvisioner(clientset, rookClientset) - if err != nil { - return err - } - - pc := controller.NewProvisionController(clientset, *provisioner, clientNFSProvisioner, serverVersion.GitVersion) - neverStopCtx := context.Background() - pc.Run(neverStopCtx) - return nil -} diff --git a/cmd/rook/nfs/server.go b/cmd/rook/nfs/server.go deleted file mode 100644 index 2e39e4c1a..000000000 --- a/cmd/rook/nfs/server.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "errors" - - "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/operator/nfs" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" -) - -var serverCmd = &cobra.Command{ - Use: "server", - Short: "Runs the NFS server to deploy and manage NFS server in kubernetes clusters", - Long: `Runs the NFS operator to deploy and manage NFS server in kubernetes clusters. -https://github.com/rook/rook`, -} - -var ( - ganeshaConfigPath *string -) - -func init() { - flags.SetFlagsFromEnv(serverCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetLoggingFlags(serverCmd.Flags()) - - ganeshaConfigPath = serverCmd.Flags().String("ganeshaConfigPath", "", "ConfigPath of nfs ganesha") - - serverCmd.RunE = startServer -} - -func startServer(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(serverCmd.Flags()) - if len(*ganeshaConfigPath) == 0 { - return errors.New("--ganeshaConfigPath is a required parameter") - } - - logger.Infof("Setting up NFS server!") - - err := nfs.Setup(*ganeshaConfigPath) - if err != nil { - logger.Fatalf("Error setting up NFS server: %v", err) - } - - logger.Infof("starting NFS server") - // This blocks until server exits (presumably due to an error) - err = nfs.Run(*ganeshaConfigPath) - if err != nil { - logger.Errorf("NFS server Exited Unexpectedly with err: %v", err) - } - - return nil -} diff --git a/cmd/rook/nfs/webhook.go b/cmd/rook/nfs/webhook.go deleted file mode 100644 index f559ac74c..000000000 --- a/cmd/rook/nfs/webhook.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "github.com/rook/rook/cmd/rook/rook" - operator "github.com/rook/rook/pkg/operator/nfs" - "github.com/spf13/cobra" -) - -var ( - port int - certDir string -) - -var webhookCmd = &cobra.Command{ - Use: "webhook", - Short: "Runs the NFS webhook admission", -} - -func init() { - webhookCmd.Flags().IntVar(&port, "port", 9443, "port that the webhook server serves at") - webhookCmd.Flags().StringVar(&certDir, "cert-dir", "", "directory that contains the server key and certificate. if not set will use default controller-runtime wwebhook directory") - webhookCmd.RunE = startWebhook -} - -func startWebhook(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(webhookCmd.Flags()) - - logger.Infof("starting NFS webhook") - webhook := operator.NewWebhook(port, certDir) - err := webhook.Run() - rook.TerminateOnError(err, "failed to run wbhook") - - return nil -} diff --git a/cmd/rook/rook/rook.go b/cmd/rook/rook/rook.go index 055407e0b..d8d5046e4 100644 --- a/cmd/rook/rook/rook.go +++ b/cmd/rook/rook/rook.go @@ -25,12 +25,12 @@ import ( "github.com/coreos/pkg/capnslog" netclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1" "github.com/pkg/errors" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - "github.com/rook/rook/pkg/util/flags" - "github.com/rook/rook/pkg/version" + rookclient "github.com/rook/cassandra/pkg/client/clientset/versioned" + "github.com/rook/cassandra/pkg/clusterd" + "github.com/rook/cassandra/pkg/operator/k8sutil" + "github.com/rook/cassandra/pkg/util/exec" + "github.com/rook/cassandra/pkg/util/flags" + "github.com/rook/cassandra/pkg/version" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/tevino/abool" @@ -57,7 +57,7 @@ var ( operatorImage string serviceAccountName string Cfg = &Config{} - logger = capnslog.NewPackageLogger("github.com/rook/rook", "rookcmd") + logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "rookcmd") ) type Config struct { @@ -102,10 +102,9 @@ func NewContext() *clusterd.Context { var err error context := &clusterd.Context{ - Executor: &exec.CommandExecutor{}, - NetworkInfo: clusterd.NetworkInfo{}, - ConfigDir: k8sutil.DataDir, - LogLevel: Cfg.LogLevel, + Executor: &exec.CommandExecutor{}, + ConfigDir: k8sutil.DataDir, + LogLevel: Cfg.LogLevel, } // Try to read config from in-cluster env diff --git a/cmd/rook/util/cmdreporter.go b/cmd/rook/util/cmdreporter.go index 9137b9f56..c10854e01 100644 --- a/cmd/rook/util/cmdreporter.go +++ b/cmd/rook/util/cmdreporter.go @@ -18,9 +18,10 @@ package util import ( "fmt" - "github.com/rook/rook/pkg/daemon/util" - "github.com/rook/rook/cmd/rook/rook" + "github.com/rook/cassandra/pkg/daemon/util" + + "github.com/rook/cassandra/cmd/rook/rook" "github.com/spf13/cobra" ) diff --git a/cmd/rook/util/copybins.go b/cmd/rook/util/copybins.go index 187b33c92..a8f57d352 100644 --- a/cmd/rook/util/copybins.go +++ b/cmd/rook/util/copybins.go @@ -19,8 +19,8 @@ package util import ( "fmt" - "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/daemon/util" + "github.com/rook/cassandra/cmd/rook/rook" + "github.com/rook/cassandra/pkg/daemon/util" "github.com/spf13/cobra" ) diff --git a/cmd/rook/version/version.go b/cmd/rook/version/version.go index 3620de74e..5811eef48 100644 --- a/cmd/rook/version/version.go +++ b/cmd/rook/version/version.go @@ -20,7 +20,7 @@ import ( "fmt" "runtime" - "github.com/rook/rook/pkg/version" + "github.com/rook/cassandra/pkg/version" "github.com/spf13/cobra" ) diff --git a/cmd/rookflex/cmd/expander.go b/cmd/rookflex/cmd/expander.go deleted file mode 100644 index 3553e29ae..000000000 --- a/cmd/rookflex/cmd/expander.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "encoding/json" - "fmt" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume" - "github.com/spf13/cobra" - "os/exec" - "strconv" -) - -var ( - expandFSCmd = &cobra.Command{ - Use: "expandfs", - Short: "Expands the size of pod filesystem", - RunE: handleExpandFs, - } -) - -func init() { - RootCmd.AddCommand(expandFSCmd) -} - -func handleExpandFs(cmd *cobra.Command, args []string) error { - var opts = &flexvolume.ExpandOptions{} - - err := json.Unmarshal([]byte(args[0]), opts) - if err != nil { - return fmt.Errorf("could not parse options for expand %s, got %+v", args[1], err) - } - client, err := getRPCClient() - if err != nil { - return fmt.Errorf("error getting RPC client: %+v", err) - } - - size, err := strconv.ParseUint(args[3], 10, 64) - if err != nil { - return fmt.Errorf("error while decoding RBD size: %+v", err) - } - - err = client.Call("Controller.Expand", flexvolume.ExpandArgs{ExpandOptions: opts, Size: size}, nil) - if err != nil { - return fmt.Errorf("error while resizing RBD: %+v", err) - } - - var command *exec.Cmd - switch opts.FsType { - case "ext3", "ext4": - // #nosec G204 Rook controls the input to the exec arguments - command = exec.Command("resize2fs", args[2]) - case "xfs": - // #nosec G204 Rook controls the input to the exec arguments - command = exec.Command("xfs_growfs", "-d", args[2]) - default: - log(client, fmt.Sprintf("resize is not supported for fs: %s", opts.FsType), false) - return nil - } - err = command.Run() - if err != nil { - return fmt.Errorf("error resizing FS: %+v", err) - } - return nil -} diff --git a/cmd/rookflex/cmd/init.go b/cmd/rookflex/cmd/init.go deleted file mode 100644 index 91bd56e1b..000000000 --- a/cmd/rookflex/cmd/init.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "os" - "path/filepath" - - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume" - "github.com/spf13/cobra" -) - -var ( - initCmd = &cobra.Command{ - Use: "init", - Short: "Initialize the volume plugin", - RunE: initPlugin, - } -) - -func init() { - RootCmd.AddCommand(initCmd) -} - -func initPlugin(cmd *cobra.Command, args []string) error { - executable, err := os.Executable() - if err != nil { - return err - } - settings := flexvolume.LoadFlexSettings(filepath.Dir(executable)) - if _, err := os.Stdout.WriteString(string(settings)); err != nil { - return err - } - os.Exit(0) - return nil -} diff --git a/cmd/rookflex/cmd/mount.go b/cmd/rookflex/cmd/mount.go deleted file mode 100644 index 6f9421ca7..000000000 --- a/cmd/rookflex/cmd/mount.go +++ /dev/null @@ -1,362 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "encoding/json" - "fmt" - "net/rpc" - "os" - "path" - "strconv" - "strings" - "syscall" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume" - "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/util/version" - k8smount "k8s.io/utils/mount" -) - -const ( - mdsNamespaceKernelSupport = "4.7" -) - -var ( - mountCmd = &cobra.Command{ - Use: "mount", - Short: "Mounts the volume to the pod volume", - RunE: handleMount, - } -) - -func init() { - RootCmd.AddCommand(mountCmd) -} - -func handleMount(cmd *cobra.Command, args []string) error { - client, err := getRPCClient() - if err != nil { - return fmt.Errorf("Rook: Error getting RPC client: %v", err) - } - - var opts = &flexvolume.AttachOptions{} - if err = json.Unmarshal([]byte(args[1]), opts); err != nil { - return fmt.Errorf("Rook: Could not parse options for mounting %s. Got %v", args[1], err) - } - opts.MountDir = args[0] - - if opts.FsType == cephFS { - return mountCephFS(client, opts) - } - - err = client.Call("Controller.GetAttachInfoFromMountDir", opts.MountDir, &opts) - if err != nil { - log(client, fmt.Sprintf("Attach volume %s/%s failed: %v", opts.BlockPool, opts.Image, err), true) - return fmt.Errorf("Rook: Mount volume failed: %v", err) - } - - // Attach volume to node - devicePath, err := attach(client, opts) - if err != nil { - return err - } - - // construct the input we'll need to get the global mount path - driverDir, err := getDriverDir() - if err != nil { - return err - } - globalMountPathInput := flexvolume.GlobalMountPathInput{ - VolumeName: opts.VolumeName, - DriverDir: driverDir, - } - - // Get global mount path - var globalVolumeMountPath string - err = client.Call("Controller.GetGlobalMountPath", globalMountPathInput, &globalVolumeMountPath) - if err != nil { - log(client, fmt.Sprintf("Attach volume %s/%s failed. Cannot get global volume mount path: %v", opts.BlockPool, opts.Image, err), true) - return fmt.Errorf("Rook: Mount volume failed. Cannot get global volume mount path: %v", err) - } - - mounter := getMounter() - // Mount the volume to a global volume path - err = mountDevice(client, mounter, devicePath, globalVolumeMountPath, opts) - if err != nil { - return err - } - - // Mount the global mount path to pod mount dir - err = mount(client, mounter, globalVolumeMountPath, opts) - if err != nil { - return err - } - log(client, fmt.Sprintf("volume %s/%s has been attached and mounted", opts.BlockPool, opts.Image), false) - setFSGroup(client, opts) - return nil -} - -func attach(client *rpc.Client, opts *flexvolume.AttachOptions) (string, error) { - - log(client, fmt.Sprintf("calling agent to attach volume %s/%s", opts.BlockPool, opts.Image), false) - var devicePath string - err := client.Call("Controller.Attach", opts, &devicePath) - if err != nil { - log(client, fmt.Sprintf("Attach volume %s/%s failed: %v", opts.BlockPool, opts.Image, err), true) - return "", fmt.Errorf("Rook: Mount volume failed: %v", err) - } - return devicePath, err -} - -func mountDevice(client *rpc.Client, mounter *k8smount.SafeFormatAndMount, devicePath, globalVolumeMountPath string, opts *flexvolume.AttachOptions) error { - notMnt, err := mounter.Interface.IsLikelyNotMountPoint(globalVolumeMountPath) - if err != nil { - if os.IsNotExist(err) { - if err = os.MkdirAll(globalVolumeMountPath, 0750); err != nil { - return fmt.Errorf("Rook: Mount volume failed. Cannot create global volume mount path dir: %v", err) - } - notMnt = true - } else { - return fmt.Errorf("Rook: Mount volume failed. Error checking if %s is a mount point: %v", globalVolumeMountPath, err) - } - } - options := []string{opts.RW} - if notMnt { - err = redirectStdout( - client, - func() error { - if err = mounter.FormatAndMount(devicePath, globalVolumeMountPath, opts.FsType, options); err != nil { - return fmt.Errorf("failed to mount volume %s [%s] to %s, error %v", devicePath, opts.FsType, globalVolumeMountPath, err) - } - return nil - }, - ) - if err != nil { - log(client, fmt.Sprintf("mount volume %s/%s failed: %v", opts.BlockPool, opts.Image, err), true) - if err := os.Remove(globalVolumeMountPath); err != nil { - log(client, fmt.Sprintf("failed to remove dir %s. %v", globalVolumeMountPath, err), false) - } - return err - } - log(client, - "Ignore error about Mount failed: exit status 32. Kubernetes does this to check whether the volume has been formatted. It will format and retry again. https://github.com/kubernetes/kubernetes/blob/release-1.7/pkg/util/mount/mount_linux.go#L360", - false) - log(client, fmt.Sprintf("formatting volume %v devicePath %v deviceMountPath %v fs %v with options %+v", opts.VolumeName, devicePath, globalVolumeMountPath, opts.FsType, options), false) - } - return nil -} - -func mount(client *rpc.Client, mounter *k8smount.SafeFormatAndMount, globalVolumeMountPath string, opts *flexvolume.AttachOptions) error { - log(client, fmt.Sprintf("mounting global mount path %s on %s", globalVolumeMountPath, opts.MountDir), false) - // Perform a bind mount to the full path to allow duplicate mounts of the same volume. This is only supported for RO attachments. - options := []string{opts.RW, "bind"} - err := redirectStdout( - client, - func() error { - err := mounter.Interface.Mount(globalVolumeMountPath, opts.MountDir, "", options) - if err != nil { - notMnt, mntErr := mounter.Interface.IsLikelyNotMountPoint(opts.MountDir) - if mntErr != nil { - return fmt.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - } - if !notMnt { - if mntErr = mounter.Interface.Unmount(opts.MountDir); mntErr != nil { - return fmt.Errorf("Failed to unmount: %v", mntErr) - } - notMnt, mntErr := mounter.Interface.IsLikelyNotMountPoint(opts.MountDir) - if mntErr != nil { - return fmt.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - } - if !notMnt { - // This is very odd, we don't expect it. We'll try again next sync loop. - return fmt.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop", opts.MountDir) - } - } - if err := os.Remove(opts.MountDir); err != nil { - log(client, fmt.Sprint("failed to remove dir", err), true) - } - return fmt.Errorf("failed to mount volume %s to %s, error %v", globalVolumeMountPath, opts.MountDir, err) - } - return nil - }, - ) - if err != nil { - log(client, fmt.Sprintf("mount volume %s/%s failed: %v", opts.BlockPool, opts.Image, err), true) - } - return err -} - -func mountCephFS(client *rpc.Client, opts *flexvolume.AttachOptions) error { - if opts.FsName == "" { - return errors.New("Rook: Attach filesystem failed: Filesystem name is not provided") - } - - log(client, fmt.Sprintf("mounting ceph filesystem %s on %s", opts.FsName, opts.MountDir), false) - - if opts.ClusterNamespace == "" { - if opts.ClusterName == "" { - return fmt.Errorf("Rook: Attach filesystem %s failed: cluster namespace is not provided", opts.FsName) - } - opts.ClusterNamespace = opts.ClusterName - } - - // Get client access info - var clientAccessInfo flexvolume.ClientAccessInfo - err := client.Call("Controller.GetClientAccessInfo", []string{opts.ClusterNamespace, opts.PodNamespace, opts.MountUser, opts.MountSecret}, &clientAccessInfo) - if err != nil { - errorMsg := fmt.Sprintf("Attach filesystem %s on cluster %s failed: %v", opts.FsName, opts.ClusterNamespace, err) - log(client, errorMsg, true) - return fmt.Errorf("Rook: %v", errorMsg) - } - - // If a path has not been provided, just use the root of the filesystem. - // otherwise, ensure that the provided path starts with the path separator char. - path := string(os.PathSeparator) - if opts.Path != "" { - path = opts.Path - if !strings.HasPrefix(path, string(os.PathSeparator)) { - path = string(os.PathSeparator) + path - } - } - - options := []string{ - fmt.Sprintf("name=%s", clientAccessInfo.UserName), - fmt.Sprintf("secret=%s", clientAccessInfo.SecretKey), - } - - // Get kernel version - var kernelVersion string - err = client.Call("Controller.GetKernelVersion", struct{}{} /* no inputs */, &kernelVersion) - if err != nil { - log(client, fmt.Sprintf("WARNING: The node kernel version cannot be detected. The kernel version has to be at least %s in order to specify a filesystem namespace."+ - " If you have multiple ceph filesystems, the result could be inconsistent", mdsNamespaceKernelSupport), false) - } else { - kernelVersionParsed, err := version.ParseGeneric(kernelVersion) - if err != nil { - log(client, fmt.Sprintf("WARNING: The node kernel version %s cannot be parsed. The kernel version has to be at least %s in order to specify a filesystem namespace."+ - " If you have multiple ceph filesystems, the result could be inconsistent", kernelVersion, mdsNamespaceKernelSupport), false) - } else { - if kernelVersionParsed.AtLeast(version.MustParseGeneric(mdsNamespaceKernelSupport)) { - options = append(options, fmt.Sprintf("mds_namespace=%s", opts.FsName)) - } else { - log(client, - fmt.Sprintf("WARNING: The node kernel version is %s, which do not support multiple ceph filesystems. "+ - "The kernel version has to be at least %s. If you have multiple ceph filesystems, the result could be inconsistent", - kernelVersion, mdsNamespaceKernelSupport), false) - } - } - } - - devicePath := fmt.Sprintf("%s:%s", strings.Join(clientAccessInfo.MonAddresses, ","), path) - - log(client, fmt.Sprintf("mounting ceph filesystem %s on %s to %s", opts.FsName, devicePath, opts.MountDir), false) - mounter := getMounter() - err = redirectStdout( - client, - func() error { - - notMnt, err := mounter.Interface.IsLikelyNotMountPoint(opts.MountDir) - if err != nil && !os.IsNotExist(err) { - return err - } - if !notMnt { - // Directory is already mounted - return nil - } - if err := os.MkdirAll(opts.MountDir, 0750); err != nil { - return errors.Wrap(err, "failed to create dir") - } - - err = mounter.Interface.Mount(devicePath, opts.MountDir, cephFS, options) - if err != nil { - // cleanup upon failure - if err := k8smount.CleanupMountPoint(opts.MountDir, mounter.Interface, false); err != nil { - log(client, fmt.Sprint("failed to cleanup mount point", err), true) - } - return fmt.Errorf("failed to mount filesystem %s to %s with monitor %s and options %v: %+v", opts.FsName, opts.MountDir, devicePath, options, err) - } - return nil - }, - ) - if err != nil { - log(client, err.Error(), true) - } else { - log(client, fmt.Sprintf("ceph filesystem %s has been attached and mounted", opts.FsName), false) - } - - setFSGroup(client, opts) - return nil -} - -// setFSGroup will set the volume ownership to the fsGroup requested in the security context of the pod mounting the storage. -// If no fsGroup is specified, does nothing. -// If the operation fails, the error will be logged, but will not undo the mount. -// Follows the pattern set by the k8s volume ownership as found in -// https://github.com/kubernetes/kubernetes/blob/7f23a743e8c23ac6489340bbb34fa6f1d392db9d/pkg/volume/volume_linux.go#L38. -func setFSGroup(client *rpc.Client, opts *flexvolume.AttachOptions) { - if opts.FsGroup == "" { - return - } - - fsGroup, err := strconv.Atoi(opts.FsGroup) - if err != nil { - log(client, fmt.Sprintf("invalid fsgroup %s. %+v", opts.FsGroup, err), true) - return - } - - path := path.Join(opts.MountDir, opts.Path) - info, err := os.Stat(path) - if err != nil { - log(client, fmt.Sprintf("fsgroup: failed to stat path %s. %+v", path, err), true) - return - } - - stat, ok := info.Sys().(*syscall.Stat_t) - if !ok { - log(client, "fsgroup: failed to get stat", true) - return - } - - if stat == nil { - log(client, fmt.Sprintf("fsgroup: unexpected nil stat_t for path %s", path), true) - return - } - - err = os.Chown(path, int(stat.Uid), fsGroup) - if err != nil { - log(client, fmt.Sprintf("fsgroup: chown failed on %v. %v", path, err), true) - } - - rwMask := os.FileMode(0770) - roMask := os.FileMode(0550) - mask := rwMask - if opts.RW != "rw" { - mask = roMask - } - - mask |= os.ModeSetgid - - err = os.Chmod(path, mask) - if err != nil { - log(client, fmt.Sprintf("fsgroup: chmod failed on %s: %+v", path, err), true) - return - } - - log(client, fmt.Sprintf("successfully set fsgroup to %d", fsGroup), false) -} diff --git a/cmd/rookflex/cmd/root.go b/cmd/rookflex/cmd/root.go deleted file mode 100644 index c947c16f4..000000000 --- a/cmd/rookflex/cmd/root.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "bytes" - "fmt" - "io" - "net" - "net/rpc" - "os" - "path" - - k8sexec "k8s.io/utils/exec" - k8smount "k8s.io/utils/mount" - - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume" - "github.com/spf13/cobra" -) - -const ( - cephFS = "ceph" -) - -// RootCmd the rookflex volume plugin cobra root command -var RootCmd = &cobra.Command{ - Use: "rookflex", - Short: "Rook Flex volume plugin", - SilenceErrors: true, - SilenceUsage: true, -} - -func getRPCClient() (*rpc.Client, error) { - driverDir, err := getDriverDir() - if err != nil { - return nil, err - } - - unixSocketFile := path.Join(driverDir, flexvolume.UnixSocketName) // /usr/libexec/kubernetes/kubelet-plugins/volume/exec/rook.io~rook/rook/.rook.sock - conn, err := net.Dial("unix", unixSocketFile) - if err != nil { - return nil, fmt.Errorf("error connecting to socket %s: %+v", unixSocketFile, err) - } - return rpc.NewClient(conn), nil -} - -func getDriverDir() (string, error) { - ex, err := os.Executable() - if err != nil { - return "", fmt.Errorf("error getting path of the Rook flexvolume driver: %v", err) - } - - return path.Dir(ex), nil -} - -func getMounter() *k8smount.SafeFormatAndMount { - return &k8smount.SafeFormatAndMount{ - Interface: k8smount.New("" /* default mount path */), - Exec: k8sexec.New(), - } -} - -func log(client *rpc.Client, message string, isError bool) { - var log = &flexvolume.LogMessage{ - Message: message, - IsError: isError, - } - // nolint // #nosec G104 in this case we want the original errors - // to be returned in case of another failure - client.Call("Controller.Log", log, nil) -} - -// redirectStdout redirects the stdout for the fn function to the driver logger -func redirectStdout(client *rpc.Client, fn func() error) error { - // keep backup of the real stdout and stderr - oldStdout := os.Stdout - oldStderr := os.Stderr - - r, w, _ := os.Pipe() - os.Stdout = w - os.Stderr = w - - // restoring the real stdout and stderr - defer func() { - os.Stdout = oldStdout - os.Stderr = oldStderr - }() - - err := fn() - - if err := w.Close(); err != nil { - return err - } - - var buf bytes.Buffer - if _, err := io.Copy(&buf, r); err != nil { - return err - } - log(client, buf.String(), false) - return err -} diff --git a/cmd/rookflex/cmd/unmount.go b/cmd/rookflex/cmd/unmount.go deleted file mode 100644 index 801a25c7e..000000000 --- a/cmd/rookflex/cmd/unmount.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "fmt" - "net/rpc" - "os/exec" - "strings" - - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume" - "github.com/spf13/cobra" - k8smount "k8s.io/utils/mount" -) - -var ( - unmountCmd = &cobra.Command{ - Use: "unmount", - Short: "Unmounts the pod volume", - RunE: handleUnmount, - } -) - -func init() { - RootCmd.AddCommand(unmountCmd) -} - -func handleUnmount(cmd *cobra.Command, args []string) error { - - client, err := getRPCClient() - if err != nil { - return fmt.Errorf("Rook: Error getting RPC client: %v", err) - } - mountDir := args[0] - - log(client, fmt.Sprintf("unmounting mount dir: %s", mountDir), false) - - mounter := getMounter() - - // Check if it's a cephfs mount - // #nosec G204 Rook controls the input to the exec arguments - fstype, err := exec.Command("findmnt", "--nofsroot", "--noheadings", "--output", "FSTYPE", "--submounts", "--target", mountDir).Output() - if err != nil { - return fmt.Errorf("failed to retrieve filesystem type for path %q. %+v", mountDir, err) - } - if strings.Contains(string(fstype), "ceph") { - return unmountCephFS(client, mounter, mountDir) - } - - var opts = &flexvolume.AttachOptions{ - MountDir: args[0], - } - - err = client.Call("Controller.GetAttachInfoFromMountDir", opts.MountDir, &opts) - if err != nil { - log(client, fmt.Sprintf("Unmount volume at mount dir %s failed: %v", opts.MountDir, err), true) - return fmt.Errorf("Unmount volume at mount dir %s failed: %v", opts.MountDir, err) - } - - // construct the input we'll need to get the global mount path - driverDir, err := getDriverDir() - if err != nil { - return err - } - globalMountPathInput := flexvolume.GlobalMountPathInput{ - VolumeName: opts.VolumeName, - DriverDir: driverDir, - } - - var globalVolumeMountPath string - err = client.Call("Controller.GetGlobalMountPath", globalMountPathInput, &globalVolumeMountPath) - if err != nil { - log(client, fmt.Sprintf("Detach volume %s/%s failed. Cannot get global volume mount path: %v", opts.BlockPool, opts.Image, err), true) - return fmt.Errorf("Rook: Unmount volume failed. Cannot get global volume mount path: %v", err) - } - - safeToDetach := false - err = redirectStdout( - client, - func() error { - - // Unmount pod mount dir - if err := k8smount.CleanupMountPoint(opts.MountDir, mounter.Interface, false); err != nil { - return fmt.Errorf("failed to unmount volume at %s: %+v", opts.MountDir, err) - } - - // Remove attachment item from the CRD - err = client.Call("Controller.RemoveAttachmentObject", opts, &safeToDetach) - if err != nil { - log(client, fmt.Sprintf("Unmount volume %s failed: %v", opts.MountDir, err), true) - // Do not return error. Try detaching first. If error happens during detach, Kubernetes will retry. - } - - // If safeToDetach is true, then all attachment on this node has been removed - // Unmount global mount dir - if safeToDetach { - if err := k8smount.CleanupMountPoint(globalVolumeMountPath, mounter.Interface, false); err != nil { - return fmt.Errorf("failed to unmount volume at %s: %+v", opts.MountDir, err) - } - } - - return nil - }, - ) - if err != nil { - return err - } - - if safeToDetach { - // call detach - log(client, fmt.Sprintf("calling agent to detach mountDir: %s", opts.MountDir), false) - err = client.Call("Controller.Detach", opts, nil) - if err != nil { - log(client, fmt.Sprintf("Detach volume from %s failed: %v", opts.MountDir, err), true) - return fmt.Errorf("Rook: Unmount volume failed: %v", err) - } - log(client, fmt.Sprintf("volume has been unmounted and detached from %s", opts.MountDir), false) - } - log(client, fmt.Sprintf("volume has been unmounted from %s", opts.MountDir), false) - return nil -} - -func unmountCephFS(client *rpc.Client, mounter *k8smount.SafeFormatAndMount, mountDir string) error { - // Unmount pod mount dir - - err := redirectStdout( - client, - func() error { - // Unmount pod mount dir - if err := k8smount.CleanupMountPoint(mountDir, mounter.Interface, false); err != nil { - return fmt.Errorf("failed to unmount cephfs volume at %s: %+v", mountDir, err) - } - return nil - }, - ) - if err != nil { - log(client, fmt.Sprintf("failed to unmount cephfs volume from %s: %+v", mountDir, err), true) - } else { - log(client, fmt.Sprintf("cephfs volume has been unmounted from %s", mountDir), false) - } - return err -} diff --git a/cmd/rookflex/main.go b/cmd/rookflex/main.go deleted file mode 100644 index bc2a2a1a5..000000000 --- a/cmd/rookflex/main.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package main - -import ( - "encoding/json" - "flag" - "fmt" - "os" - "strings" - - "github.com/rook/rook/cmd/rookflex/cmd" -) - -type result struct { - Status string `json:"status"` - Message string `json:"message,omitempty"` -} - -func main() { - // workaround a k8s logging issue: https://github.com/kubernetes/kubernetes/issues/17162 - if err := flag.CommandLine.Parse([]string{}); err != nil { - panic(err) - } - - var r result - if err := cmd.RootCmd.Execute(); err != nil { - if strings.HasPrefix(err.Error(), "unknown command") { - r.Status = "Not supported" - } else { - r.Status = "Failure" - r.Message = err.Error() - } - } else { - r.Status = "Success" - } - reply(r) -} - -func reply(r result) { - code := 0 - if r.Status == "Failure" { - code = 1 - } - res, err := json.Marshal(r) - if err != nil { - fmt.Println(`{"status":"Failure","message":\"JSON error"}`) - } else { - fmt.Println(string(res)) - } - os.Exit(code) -} diff --git a/design/ceph/ac_design.png b/design/ceph/ac_design.png deleted file mode 100644 index 2e2c9d7d2..000000000 Binary files a/design/ceph/ac_design.png and /dev/null differ diff --git a/design/ceph/ac_dev_design.png b/design/ceph/ac_dev_design.png deleted file mode 100644 index 0bc409531..000000000 Binary files a/design/ceph/ac_dev_design.png and /dev/null differ diff --git a/design/ceph/admission-controller.md b/design/ceph/admission-controller.md deleted file mode 100644 index 76a3a580b..000000000 --- a/design/ceph/admission-controller.md +++ /dev/null @@ -1,148 +0,0 @@ -# Admission Controllers in Rook - -# Background - -This proposal is to add support for admission controllers in Rook. An admission controller is a piece of code that intercepts requests to the Kubernetes API server prior to persistence of the object, but after the request is authenticated and authorized - - There are two special controllers: MutatingAdmissionWebhook and ValidatingAdmissionWebhook. - - Mutating controllers may modify the objects they admit but validation controllers are only allowed to validate requests. - -Currently, user can manipulate Custom Resource specs with any values which may result in Rook not functioning as expected. The present validation method in Kubernetes is the OpenAPI schema validation which can be used for basic validations like checking type of data, providing a range for the values etc but anything more complex (checking resource availability, network status, error handling) would not be possible under this scenario. - -# Admission Controllers Concept - -![AC Image](ac_design.png) - -## Requirements - -1. Webhook server which will validate the requests -2. TLS certificates for the server, -3. ValidatingWebhookConfig/MutatingWebhookConfig which will intercept requests and send a HTTPS request to the webhook server. -4. RBAC Components - -## Implementation - -As shown in the above diagram, the admission control process proceeds in two phases. In the first phase, mutating admission controllers are run. In the second phase, validating admission controllers are run. Note again that some of the controllers are both. - -The admission controllers intercept requests based on the values given in the configuration. In this config, we have to provide the details on -* What resources should it be looking for ? (Pods, Service) -* What api version and group does it belong to ? *Example : ApiVersion = (v1, v1beta) Group version = (rook.ceph.io, admissionregistration.k8s.io)* -* What kind of operation should it intercept ? (Create, Update, Delete) -* A valid base64 encoded CA bundle. -* What path do we want to send with HTTPs request (/validate, /mutate) - -A webhook server should be in place (*with valid TLS certificates*) to intercept any HTTPs request that comes with the above path value. Once the request is intercepted by the server, an [AdmissionRequest](https://github.com/kubernetes/api/blob/master/admission/v1beta1/types.go#L40) object is sent through with the resource specifications. - -When the webhook server receives Admission Request, it will perform predefined validations on the provided resource values and send back an [AdmissionResponse](https://github.com/kubernetes/api/blob/master/admission/v1beta1/types.go#L116) with the indication whether request is accepted or rejected. - -If any of the controllers in either phase reject the request, the entire request is rejected immediately and an error is returned to the end-user. - -## Certificate Management - -### Development and Testing - -![Dev_AC_Image](./ac_dev_design.png) - -We can use self-signed certificates approved by the Kubernetes Certificate Authority for development purposes.This can be done by following the steps given below. -* Creating the private key and certs using openssl. - -**Sample : Generating the public and private keys** - -``` -openssl genrsa -out ${PRIVATE_KEY_NAME}.pem 2048 -openssl req -new -key ${PRIVATE_KEY_NAME}.pem -subj "/CN=${service}.${namespace}.svc" - -#Created after CSR is approved -openssl base64 -d -A -out ${PUBLIC_KEY_NAME}.pem -``` - -* Creating and sending a CSR to kubernetes for approval - -**Sample : Certificate Signing Request (CSR) in Kubernetes** -``` -apiVersion: certificates.k8s.io/v1beta1 -kind: CertificateSigningRequest -metadata: - name: ${csrName} -spec: - request: $(cat server.csr | base64 | tr -d '\n') - usages: - - digital signature - - key encipherment - - server auth -``` -Verify with -` kubectl get csr ${csrName}` - -**Sample : Approval of Signed Certificate** - -``` -kubectl certificate approve ${csrName} -``` - -If it is approved, then we can check the certificate with following command - -``` -$(kubectl get csr ${csrName} -o jsonpath='{.status.certificate}') -``` - -* Once approved, a generic secret will be created with the given public and private key which will be later mounted onto the server pod for use. - -**Sample : Creating a Secret in Kubernetes** -``` -kubectl create secret generic ${secret} \ - --from-file=key.pem=${PRIVATE_KEY_NAME}.pem \ - --from-file=cert.pem=${PUBLIC_KEY_NAME}.pem -``` -* Modifying the webhook config to inject CA bundle onto the ValidatingWebhookConfig -* All the above resources will be created in rook-ceph namespace - - -Using the above approach, the dev/admins will have the responsibility of rotating the certificates when they expire. - -Below is an excerpt of what a ValidatingWebhookConfig looks like - -``` -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: demo-webhook -webhooks: - - name: webhook-server.webhook-demo.svc - clientConfig: - service: - name: webhook-server - namespace: rook-ceph - path: "/validate" - caBundle: ${CA_PEM_B64} - rules: - - operations: [ "CREATE" ] - apiGroups: [""] - apiVersions: ["v1"] - resources: ["pods"] -``` - -We can make changes to the above values according to intercept based on whether a resource is being updated/deleted/created or change the type of resource or the request path which will be sent to the server. - -## Setup in Rook - -### Starting the Webhook Server - - Based on the whether the secrets are present, the rook operator will deploy the relevant configuration files onto the cluster and start the server. - -The secrets will be volume mounted on the rook operator pod dynamically when they are detected. After the volumes are mounted, an HTTPs server would be started. - -Once the server starts, it will look for the appropriate tls key and crt files in the mounted volumes and start intercepting requests based on the path set in ValidatingWebhookConfig. - -### Error Handling - -If the server is unable to find valid certificates, It will not deploy any admission controller components onto the cluster and hence rook will continue to function normally as before. - -# References -1. https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ -1. https://github.com/kubernetes/api/blob/master/admission/v1beta1/types.go -1. https://kubernetes.io/blog/2019/03/21/a-guide-to-kubernetes-admission-controllers/ -1. https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/ - - diff --git a/design/ceph/ceph-cluster-cleanup.md b/design/ceph/ceph-cluster-cleanup.md deleted file mode 100644 index 251373a12..000000000 --- a/design/ceph/ceph-cluster-cleanup.md +++ /dev/null @@ -1,128 +0,0 @@ -# Ceph cluster clean up policy - -## Use case - -As a rook user, I want to clean up data on the hosts after I intentionally uninstall ceph cluster, so that I can start a new cluster without having to do any manual clean up. - -## Background - -### Cluster deletion -If the user deletes a rook-ceph cluster and wants to start a new cluster on the same hosts, then following manual steps should be performed: -- Delete the dataDirHostPath on each host. Otherwise, stale keys and other configs will remain from the previous cluster and the new mons will fail to start. -- Clean the OSD disks from the previous cluster before starting a new one. - -Read more about the manual clean up steps [here](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) - -This implementation aims to automate both of these manual steps. - -## Design Flow - -### User confirmation - -- **Important**: User confirmation is mandatory before cleaning up the data on hosts. This is important because user might have accidentally deleted the CR and in that case cleaning up the hostpath won’t recover the cluster. -- Adding these user confirmation on the ceph cluster would cause the operator to refuse running an orchestration - -### How to add user confirmation - -- If the user really wants to clean up the data on the cluster, then update the ceph cluster CRD with cleanupPolicy configuration like below : - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - cephVersion: - image: quay.io/ceph/ceph:v16.2.5 - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: true - storage: - useAllNodes: true - useAllDevices: true - cleanupPolicy: - confirmation: yes-really-destroy-data - sanitizeDisks: - method: quick - dataSource: zero - iteration: 1 - allowUninstallWithVolumes: false -``` - -- Updating the cluster `cleanupPolicy` with `confirmation: yes-really-destroy-data` would cause the operator to refuse running any further orchestration. - -### How the Operator cleans up the cluster - -- Operator starts the clean up flow only when deletionTimeStamp is present on the ceph Cluster. -- Operator checks for user confirmation (that is, `confirmation: yes-really-destroy-data`) on the ceph cluster before starting the clean up. -- Identify the nodes where ceph daemons are running. -- Wait till all the ceph daemons are destroyed on each node. This is important because deleting the data (say dataDirHostPath) before the daemons would cause the daemons to panic. -- Create a batch job that runs on each of the above nodes. -- The job performs the following action on each node based on the user confirmation: - - cleanup the cluster namespace on the dataDirHostPath. For example `/var/lib/rook/rook-ceph` - - Delete all the ceph monitor directories on the dataDirHostPath. For example `/var/lib/rook/mon-a`, `/var/lib/rook/mon-b`, etc. - - Sanitize the local disks used by OSDs on each node. -- Local disk sanitization can be further configured by the admin with following options: - - `method`: use `complete` to sanitize the entire disk and `quick` (default) to sanitize only ceph's metadata. - - `dataSource`: indicate where to get random bytes from to write on the disk. Possible choices are `zero` (default) or `random`. - Using random sources will consume entropy from the system and will take much more time then the zero source. - - `iteration`: overwrite N times instead of the default (1). Takes an integer value. -- If `allowUninstallWithVolumes` is `false` (default), then operator would wait for the PVCs to be deleted before finally deleting the cluster. - -#### Cleanup Job Spec: - -```yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: cluster-cleanup-job- - namespace: - labels: - app: rook-ceph-cleanup - rook-ceph-cleanup: "true" - rook_cluster: -spec: - template: - spec: - containers: - - name: rook-ceph-cleanup- - securityContext: - privileged: true - image: - env: - # if ROOK_DATA_DIR_HOST_PATH is available, then delete the dataDirHostPath - - name: ROOK_DATA_DIR_HOST_PATH - value: - - name: ROOK_NAMESPACE_DIR - value: - - name: ROOK_MON_SECRET - value: - - name: ROOK_CLUSTER_FSID - value: - - name: ROOK_LOG_LEVEL - value: - - name: ROOK_SANITIZE_METHOD - value: - - name: ROOK_SANITIZE_DATA_SOURCE - value: - - name: ROOK_SANITIZE_ITERATION - value: - args: []string{"ceph", "clean"} - volumeMounts: - - name: cleanup-volume - # data dir host path that needs to be cleaned up. - mountPath: - - name: devices - mountPath: /dev - volume: - - name: cleanup-volume - hostPath: - #directory location on the host - path: - - name: devices - hostpath: - path: /dev - restartPolicy: OnFailure -``` diff --git a/design/ceph/ceph-config-updates.md b/design/ceph/ceph-config-updates.md deleted file mode 100644 index 9400e5c66..000000000 --- a/design/ceph/ceph-config-updates.md +++ /dev/null @@ -1,257 +0,0 @@ -# Updating Rook's Ceph configuration strategy (Jan 2019) -**Targeted for v1.0-v1.1** - -## Background - -Starting with Ceph Mimic, Ceph is able to store the vast majority of config options for all daemons -in the Ceph mons' key-value store. This ability to centrally manage 99% of its configuration -is Ceph's preferred way of managing config. This conveniently allows Ceph to fit better into the -containerized application space than before. - -However, for it's own backwards compatibility, Ceph options set in the config file or via command -line flags will override the centrally-configured settings. To make the most of this functionality -within Ceph, it will be necessary to limit the configuration options specified by Rook in either -config files or on the command line to a minimum. - - -## Proposed new design - -The end goal of this work will be to allow Ceph to centrally manage its own configuration as much as -possible. Or in other terms, Rook will specify the barest minimum of configuration options in the -config file or on the command line with a priority on clearing the config file. - -### Minimal config file - -All Ceph options in the config file can be set via the command line, so it is possible to remove the -need for having a `ceph.conf` in containers at all. This is preferred over a config file, as it is -possible to inspect the entire config a daemon is started with by looking at the pod description. - -Some parts of the config file will have to be kept as long as Rook supports Ceph Luminous. See the -[supporting Luminous](#supporting-luminous) section below for more details. - -### Minimal command line options - -The minimal set of command line options is pared down to the settings which inform the daemon where -to find and/or store data. - -Required flags: -- `--fsid` is not required but is set to ensure daemons will not connect to the wrong cluster -- `--mon-host` is required to find mons, and when a pod (re)starts it must have the latest - information. This can be achieved by storing the most up-to-date mon host members in a Kubernetes - ConfigMap and setting this value a container environment variable mapped to the ConfigMap value. -- `--{public,cluster}-addr` are not required for most daemons but ... - - `--public-addr` and `--public-bind-addr` are necessary for mons - - `--public-addr` and `--cluster-addr` may be needed for osds -- `--keyring` may be necessary to inform daemons where to find the keyring which must be mounted to - a nonstandard directory by virtue of being sourced via a Kubernetes secret. - - The keyring could copied from the secret mount location to Ceph's default location with an init - container, and for some daemons, this may be necessary. This should not be done for the mons, - however, as the mon keyrings also include the admin keyring, and persisting the admin key to - disk should be avoided at all costs for security. - -Notable non-required flags: -- `--{mon,osd,mgr,mds,rgw}-data-dir` settings exist for all daemons, but it is more desirable to use - the `/var/lib/ceph//ceph-` directory for daemon data within containers. If - possible, mapping the `dataDirHostPath/` path on hosts to this default - location in the container is preferred. - - Note that currently, `dataDirHostPath` is mapped directly to containers, meaning that each - daemon container has access to other daemon containers' host-persisted data. Modifying Rook's - behavior to only mount the individual daemon's data dir into the container as proposed here will - be a small security improvement on the existing behavior. -- `--run-dir` exists for all daemons, but it is likewise more desirable to use the `/var/run/ceph` - path in containers. Additionally, this directory stores only unix domain sockets, and it does not - need to be persisted to the host. We propose to simply use the `/var/run/ceph` location in - containers for runtime storage of the data. - -### Additional configuration - -Additional configuration which Rook sets up initially should be done by setting values in Ceph's -centrally-stored config. A large group of additional configurations can be configured at once via -the Ceph command `ceph config assimilage-conf`. Care should be taken to make sure that Rook does not -modify preexisting user-specified values. - -In the initial version of this implementation, Rook will set these values on every operator restart. -This may result in user configs being overwritten but will ensure the user is not able to render -Rook accidentally unusable. In the future, means of determining whether a user has specified a value -or whether Rook has specified it is desired which may mean a feature addition to Ceph. - -Existing global configs configured here: -- `mon allow pool delete = true` -- `fatal signal handlers = false` is configured here, but this could be a vestigial config from - Rook's old days that can be removed (some more research needed) -- `log stderr prefix = "debug "` should be set for all daemons to differentiate logging from auditing -- `debug ...` configs - -Removed configs: -- `mon_max_pg_per_osd = 1000` is a dangerous setting and should be removed regardless of whether - this proposal is accepted -- `log file = /dev/stderr` is set by default to keep with container standards and kept here if the - user needs to change this for debugging/testing -- `mon cluster log file = /dev/stderr` for `log file` reasons above -- `mon keyvaluedb = rocksdb` is not needed for Luminous+ clusters -- `filestore_omap_backend = rocksdb` is not needed for Luminous+ -- `osd pg bits = 11` set (if needed) using config override for testing or play clusters -- `osd pgp bits = 11` set (if needed) using config override for testing or play clusters -- `osd pool default size = 1` set (if needed) using config override for testing or play clusters -- `osd pool default min size = 1` set (if needed) using config override for testing or play clusters -- `osd pool default pg num = 100` set (if needed) using config override for testing or play clusters -- `osd pool default pgp num = 100` set (if needed) using config override for testing or play clusters -- `rbd_default_features = 3` kubernetes should support Ceph's default RBD features after k8s v1.8 - -#### Additional configs via user override - -Rook currently offers the option of a config override in a ConfigMap which users may modify after -the Ceph operator has started. We propose to keep the "spirit" of this functionality but change the -method of implementation, as the ConfigMap modification approach will be hard to integrate with the -final goal of eliminating the config file altogether. Instead, we propose to update the Ceph cluster -CRD to support setting and/or overriding values at the time of cluster creation. The proposed format -is below. - -```yaml -apiVersion: ceph.rook.io/v2alpha1 -kind: CephCluster -spec: - # ... - # For advanced users: - # 'config' adds or overrides values in the Ceph config at operator start time and when the cluster - # CRD is updated. Config changes are made in the mon's centralized config if it is available - # (Mimic+) so that the user may override them temporarily via Ceph's command line. For Luminous, - # the changes are set on the command line since the centralized config is not available, and - # temporary overrides will not be possible. - config: - # Each key in the 'config' section represents a config file section. 'global' is likely to be - # the only section which is modified; however, daemons can have their config overridden - # explicitly if desired. - # global will add/override config for all Ceph daemons - global: - # The below "osd_pool_default..." settings make the default pools created have no replication - # and should be removed for production clusters, as this could impact data fault tolerance. - osd_pool_default_size: 1 - # mon will add/override config for all mons - mon: - mon_cluster_log_file: "/dev/stderr" - # osd.0 will add/override config only for the osd with ID 0 (zero) - osd.0: - debug_osd: 10 - # ... -``` -**Note on the above:** all values under config are reported to Ceph as strings, but the yaml should -support integer values as well if at all possible - -As stated in the example yaml, above, the 'config' section adds or overrides values in the Ceph -config whenever the Ceph operator starts and whenever the user updates the cluster CRD. Ceph -Luminous does not have a centralized config, so the overrides from this section will have to be set -on the command line. For Ceph Mimic and above, the mons have a centralized config which will be used -to set/override configs. Therefore, for Mimic+ clusters, the user may temporarily override values -set here, and those values will be reset to the `spec:config` values whenever the Ceph operator is -restarted or the cluster CRD is updated. - -#### Additional configs for test/play environments - -Test (especially integration tests) may need to specify `osd pool default size = 1` and -`osd pool default min size = 1` to support running clusters with only one osd. Test environments -would have a means of doing this fairly easily using the config override capability. These values -should not be set to these low values for production clusters, as they may allow admins to create -their own pools which are not fault tolerant accidentally. - -There is an option to set these values automatically for clusters which run with only one osd or to -set this value for clusters with a number of osds less than the default programmatically within -Rook's operator; however, this adds an additional amount of code flow complexity which is -unnecessary except in the integration test environments or in minimal demo environments. A middle -ground proposed herein is to add `osd pool default {,min} size = 1` overrides to the example cluster -CRD so that users "just trying out Rook" still get today's easy experience but where they can be -easily removed for production clusters that should not run with potentially dangerous settings. - -### Changes to Ceph mons - -The current method of starting mons where `mon-a` has an initial member of `a`, `mon-b` initial -members `a b`, `mon-c` initial members `a b c`, etc. Has worked so far but could result in a race -condition. Mon cluster stability is important to Ceph, and it is critical for this PR that the mons' -centrally-stored config is stable, so we here note that this behavior should be fixed such that the -mon initial members are known before the first mon is bootstrapped to consider this proposal's work -completed. Practically speaking, this will merely require the mon services to be started and have -IP addresses before the mons are bootstrapped. - -Additionally, generating the monmap during mon daemon initialization is unnecessary if `--mon-host` -is set for the `ceph-mon --mkfs` command. - -Creating `/var/lib/ceph/mon-/data/kv_backend` is no longer necessary in Luminous and -can be removed. - - -## Planning changes - -This proposal herein makes the suggestion that the changes be done with a new PR for each daemon -starting with the mons, as the mons are most affected. After the mons are done, the remaining 4 -daemons can be done in parallel. - -Once all 5 daemons are complete, there will likely be a need to refactor the codebase to remove any -vestigial remnants of the old config design which have been left. It will also be a good time to -look for any additional opportunities to reduce code duplication by teasing repeated patterns out -into shared modules. - -Another option is to modify all 5 daemons such that support is focused on Luminous, and the final -clean-up stage could be a good time to introduce support for Mimic and its new centralized mon KV -all at once. - -### Supporting Luminous - -Luminous does not have the mon's centralized kv store for Ceph configs, so any config set in the mon -kv store should be set in the config file for Luminous, and users may override these values via -Rook's config override feature. - -### Secondary considerations - -The implementation of this work will naturally remove most of the need for Rook to modify Ceph -daemon configurations via its `config-init` code paths, so it will also be a good opportunity to -move all daemon logic into the operator process where possible. - - -## Appendix A - at-a-glance config changes compared to Rook's v0.9 Ceph config file -``` -NEW LOCATION ----------------- -REMOVED [global] -FLAG fsid = bd4e8c5b-80b8-47d5-9e39-460eccc09e62 -REMOVED run dir = /var/lib/rook/mon-c -FLAG AS NEEDED mon initial members = b c a -FLAG mon host = 172.24.191.50:6790,172.24.97.67:6790,172.24.123.44:6790 -MON KV log file = /dev/stderr -MON KV mon cluster log file = /dev/stderr -FLAG AS NEEDED public addr = 172.24.97.67 -FLAG AS NEEDED cluster addr = 172.16.2.122 -FLAG AS NEEDED public network = not currently used -FLAG AS NEEDED cluster network = not currently used -REMOVED mon keyvaluedb = rocksdb -MON KV mon_allow_pool_delete = true -REMOVED mon_max_pg_per_osd = 1000 -MON KV debug default = 0 -MON KV debug rados = 0 -MON KV debug mon = 0 -MON KV debug osd = 0 -MON KV debug bluestore = 0 -MON KV debug filestore = 0 -MON KV debug journal = 0 -MON KV debug leveldb = 0 -OVERRIDE filestore_omap_backend = rocksdb -OVERRIDE osd pg bits = 11 -OVERRIDE osd pgp bits = 11 -OVERRIDE osd pool default size = 1 -OVERRIDE osd pool default min size = 1 -OVERRIDE osd pool default pg num = 100 -OVERRIDE osd pool default pgp num = 100 -REMOVED rbd_default_features = 3 -MON KV / REMOVED? fatal signal handlers = false - -REMOVED [daemon.id] -FLAG AS NEEDED keyring = /var/lib/rook/mon-c/data/keyring -``` - -New location key: -``` - - REMOVED - removed entirely from the config - - FLAG - flag always set - - FLAG AS NEEDED - set as a command line flag if/when it is needed - - MON KV - store in the mon's central config (except for Luminous) - - OVERRIDE - removed but will need to be added in override for some scenarios (test/play) -``` diff --git a/design/ceph/ceph-csi-driver.md b/design/ceph/ceph-csi-driver.md deleted file mode 100644 index 6993cff0e..000000000 --- a/design/ceph/ceph-csi-driver.md +++ /dev/null @@ -1,144 +0,0 @@ -# Ceph CSI Driver Support -**Targeted for v0.9** - -## Background - -Container Storage Interface (CSI) is a set of specifications for container -orchestration frameworks to manage storage. The CSI spec abstracts common -storage features such as create/delete volumes, publish/unpublish volumes, -stage/unstage volumes, and more. It is currently at the 1.0 release. - -Kubernetes started to support CSI with alpha support in -[1.9](https://kubernetes.io/blog/2018/01/introducing-container-storage-interface/), -beta support in -[1.10](https://kubernetes.io/blog/2018/04/10/container-storage-interface-beta/), -and CSI 1.0 in [Kubernetes -1.13](https://kubernetes.io/blog/2018/12/03/kubernetes-1-13-release-announcement/). - -It is projected that CSI will be the only supported persistent storage driver -in the near feature. In-tree drivers such as Ceph RBD and CephFS will be replaced with their respective CSI drivers. - -## Ceph CSI Drivers Status - -There have been active Ceph CSI drivers developments since Kubernetes 1.9. -Both Ceph RBD and CephFS drivers can be found at -[ceph/ceph-csi](https://github.com/ceph/ceph-csi). Currently ceph-csi -supports both the CSI v0.3.0 spec and CSI v1.0 spec. - -* RBD driver. Currently, rbd CSI driver supports both krbd and rbd-nbd. There is a consideration to support other forms of TCMU based drivers. -* CephFS driver. Both Kernel CephFS and Ceph FUSE are supported. When `ceph-fuse` is installed on the CSI plugin container, it can be used to mount CephFS shares. - -There is also upstream Kubernetes work to [include these drivers for e2e tests](https://github.com/kubernetes/kubernetes/pull/67088). - -## Kubernetes CSI Driver Deployment - -Starting Kubernetes CSI driver brings up an [external-provisioner](https://github.com/kubernetes-csi/external-provisioner), an [external-attacher](https://github.com/kubernetes-csi/external-attacher), DaemonSet that runs the driver on the nodes, and optionally an [external-snapshotter](https://github.com/kubernetes-csi/external-snapshotter). - -For example, deploying a CephFS CSI driver consists of the following steps: -1. Creating a [RBAC for external provisioner](https://github.com/ceph/ceph-csi/blob/master/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml) and the [provisioner itself](https://github.com/ceph/ceph-csi/blob/master/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml). -2. Creating [RBAC for CSI driver](https://github.com/ceph/ceph-csi/blob/master/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml) and [driver DaemonSet](https://github.com/ceph/ceph-csi/blob/master/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml) -3. Creating Storage Classes for CSI provisioners. - -## Integration Plan - -The aim is to support CSI 1.0 in Rook as a beta with Rook release 1.0. In Rook -1.1 CSI support will be considered stable. - -### How can Rook improve CSI drivers reliability? - -Rook can ensure resources used by CSI drivers and their associated Storage Classes are created, protected, and updated. Specifically, when a CSI based Storage Class is created, the referenced Pools and Filesystems should exist by the time a PVC uses this Storage Class. Otherwise Rook should resolve missing resources to avoid PVC creation failure. Rook should prevent Pools or Filesystems that are still being used by PVCs from accidental removal. Similarly, when the resources, especially mon addresses, are updated, Rook should try to update the Storage Class as well. - -Rook Ceph Agent supports Ceph mon failover by storing Ceph cluster instead of Ceph mon addresses in Storage Classes. This gives the Agent the ability to retrieve most current mon addresses at mount time. RBD CSI driver also allows mon address stored in other Kubernetes API objects (currently in a Secret) than Storage Class. However, Rook Operator must be informed to update mon addresses in this object during mon failover/addition/removal. Either a new CRD object or a Storage Class label has to be created to help Operator be aware that a Ceph cluster is used by a CSI based Storage Class. When Operator updates mon addresses, the Secret referenced by the Storage Class must be updated as well to pick up the latest mon addresses. - -### Coexist with flex driver - -No changes to the current method of deploying Storage Classes with the flex -driver should be required. Eventually, the flex driver approach will be -deprecated and CSI will become the default method of working with Storage -Classes. At or around the time of flex driver deprecation Rook should provide a -method to upgrade/convert existing flex driver based provisioning with CSI -based provisioning. - -The behavior of the CSI integration must be complementary to the approach -currently taken by the flex volume driver. When CSI is managed through Rook it -should work with the existing Rook CRDs and aim to minimize the required -configuration parameters. - - -### Work with out-of-band CSI drivers deployment - -Rook generally has more information about the state of the cluster than the -static settings in the Storage Class and is more up-to-date than the system's -administrators. When so configured, Rook could additionally manage the -configuration of CSI not directly managed by Rook. - - -### Rook initiated CSI drivers deployment - -With the addition of CSI 1.0 support in Kubernetes 1.13 Rook should become a -fully-featured method of deploying Ceph-CSI aiming to minimize extra steps -needed to use CSI targeting the Rook managed Ceph cluster(s). Initially, this -would be an opt-in feature that requires Kubernetes 1.13. Supporting CSI -versions earlier than 1.0 will be a non-goal. - -Opting in to CSI should be simple and require very few changes from the -currently documented approach for deploying Rook. Configuring Rook to use CSI -should require changing only the default mechanism for interacting with the -storage classes. The standard deployment should include the needed RBAC files -for managing storage with Ceph-CSI. Rook should package and/or source all other -needed configuration files/templates. All other configuration must be defaulted -to reasonable values and only require changing if the user requires it. - -The following are plans to converge the user experience when choosing to use -CSI rather than the flex volume method: - -#### Ceph-CSI Requirements - -To manage CSI with Rook the following requirements are placed on the -ceph-csi project: -* Configuration parameters currently set in the Storage Class will be - configurable via secrets: - * Mons (done) - * Admin Id - * User Id -* The key "blockpool" will serve as an alias to "pool" - -To additionally minimize the required parameters in a Storage Class it may -require changes to create CSI instance secrets; secrets that are associated -with CSI outside of the storage class (see [ceph-csi -PR#244](https://github.com/ceph/ceph-csi/pull/224)). If this change is made -nearly no parameters will be directly required in the storage class. - -#### Rook Requirements - -To manage CSI with Rook the following requirements are place on Rook: -* Rook deployments must include all the needed RBAC rules to set up CSI -* Rook deploys all additional CSI components required to provision - and mount a volume -* Rook must be able to dynamically update the secrets used to configure - Ceph-CSI, including but not limited to the mons list. -* Users should not be required to deploy Rook differently when using - CSI versus flex except minimal steps to opt in to CSI -* When provisioning Ceph-CSI Rook must uniquely identify the - driver/provisioner name so that multiple CSI drivers or multiple Rook - instances within a (Kubernetes) cluster will not collide - - -### Future points of integration - -While not immediately required this section outlines a few improvements -that could be made to improve the Rook and CSI integration: - -#### Extend CephBlockPool and CephFilesystem CRDs to provision Storage Classes - -Extend CephBlockPool and CephFilesystem CRDs to automatically provision Storage -Classes when so configured. Instead of requiring an administrator to create a -CRD and a Storage Class, add metadata to the CRD such that Rook will -automatically create storage classes based on that additional metadata. - -#### Select Flex Provisioning or CSI based on CephCluster CRD - -Currently the code requires changing numerous parameters to enable CSI. This -document aims to change that to a single parameter. In the future it may be -desirable to make this more of a "runtime" parameter that could be managed in -the cluster CRD. diff --git a/design/ceph/ceph-external-cluster.md b/design/ceph/ceph-external-cluster.md deleted file mode 100644 index 85fb97bdc..000000000 --- a/design/ceph/ceph-external-cluster.md +++ /dev/null @@ -1,159 +0,0 @@ -# Rook and External Ceph Clusters - -Target version: 1.1 - -Rook was designed for storage consumption in the same Kubernetes cluster as the clients who are consuming the storage. However, this scenario is not always sufficient. - -Another common scenario is when Ceph is running in an “external” cluster from the clients. There are a number of reasons for this scenario: -- Centralized Ceph management in a single cluster with multiple Kubernetes clusters that need to consume storage. -- Customers already have a Ceph cluster running not in a K8s environment, likely deployed with Ansible, ceph-deploy, or even manually. They should be able to consume this storage from Kubernetes. -- Fully independent storage for another level of isolation from their K8s compute nodes. This scenario can technically also be accomplished in a single Kubernetes cluster through labels, taints, and tolerations. - -## Terminology - -| | | -|---|---| -| **Local** Cluster | The cluster where clients are running that have a need to connect to the Ceph storage. Must be a Kubernetes/OpenShift cluster. | -| **External** Cluster | The cluster where Ceph Mons, Mgr, OSDs, and MDS are running, which might have been deployed with Rook, Ansible, or any other method. | - -## Requirements - -Requirements for clients in the local cluster to connect to the external cluster include: -- At least one mon endpoint where the connection to the cluster can be established -- Admin keyring for managing the cluster -- Network connectivity from a local cluster to the mons, mgr, osds, and mds of the external cluster: - - mon: for the operator to watch the mons that are in quorum - - mon/osd: for client access - - mgr: for dashboard access - - mds: for shared filesystem access - -## Rook Ceph Operator - -When the Rook operator is started, initially it is not aware of any clusters. When the admin creates the operator, they will want to configure the operator differently depending on if they want to configure a local Rook cluster, or an external cluster. - -If external cluster management is required, the differences are: -- The Rook Discover DaemonSet would not be necessary. Its purpose is to detect local devices, which is only needed for OSD configuration. - - Side note: If a local cluster, the discover DaemonSet could be delayed starting until the first cluster is started. There is no need for the discovery until the first cluster is created. -- The Security Context Constraints (SCC) would not require all the privileges of a local cluster. These privileges are only required by mon and/or osd daemon pods, which are not running in the local cluster. - - `allowPrivilegedContainer` - - `allowHostDirVolumePlugin` - - `allowHostPID` - - `allowHostIPC` - - `allowHostPorts` - -## CSI Driver - -The CSI driver is agnostic of whether Ceph is running locally or externally. The core requirement of the CSI driver is the list of mons and the keyring with which to connect. This metadata is required whether the cluster is local or external. The Rook operator will need to keep this metadata updated throughout the lifetime of the CSI driver. - -The CSI driver will be installed and configured by the Rook operator, similarly to any Rook cluster. The advantages of this approach instead of a standalone ceph-csi for external clusters include: -- Provide a consistent experience across any Kubernetes/OpenShift deployment -- Rook can install, configure, and update the Ceph CSI driver. Admins don't have to worry about the CSI driver. - -Question: How would Rook behave in the case where the admin deployed ceph-csi standalone as well as Rook? It seems reasonable not to support this, although it's not clear if there would actually be conflicts between the two. - -The flex driver would also be agnostic of the cluster for the same reasons, but we won’t need to worry about the flex driver going forward. - -## Rook-Ceph Cluster CRD - -In order for Rook to provide the storage to clients in the local cluster, the CephCluster CRD will be created in order for the operator to provide local management of the external cluster. There are several differences needed for the operator to be aware of an external cluster. - -1. Before the CephCluster CRD is created, some metadata must be initialized in local configmaps/secrets to allow the local cluster to manage the external cluster. - * mon endpoint(s) and admin keyring -1. The mon, mgr, and osd daemons will not be managed by the local Rook operator. These daemons must be created and managed by the external cluster. -1. The operator will make a “best effort” to keep the list of mons updated. - * If the mons change in the external cluster, the list of mons must be updated in the local cluster. - * The operator will need to query the Ceph status periodically (perhaps every minute). If there is a change to the mons, the operator will update the local configmaps/secrets.\ - * If the local operator fails to see changes to the external mons, perhaps because it is down, the mon list could become stale. In that case, the admin will need to update the list similarly to how it was initialized when the local cluster was first created. - * The operator will update the cluster crd with the following status fields: - - Timestamp of the last successful time querying the mons - - Timestamp of the last attempt to query the mons - - Success/Failure message indicating the result of the last check - -The first bullet point above requires an extra manual configuration step by the cluster admin from what they need in a typical Rook cluster. The other items above will be handled automatically by the Rook operator. The extra step involves exporting metadata from the external cluster and importing it to the local cluster: -1. The admin creates a yaml file with the needed resources from the external cluster (ideally we would provide a helper script to help automate this task): - * Save the mon list and admin keyring -1. Load the yaml file into the local cluster - * `kubectl create -f ` - -The CephCluster CRD will have a new property “external” to indicate whether the cluster is external. If true, the local operator will implement the described behavior. -Other CRDs such as CephBlockPool, CephFilesystem, and CephObjectStore do not need this property since they all belong to the cluster and will effectively -inherit the external property. - -```yaml -kind: CephCluster -spec: - external: true -``` - -The mgr modules, including the dashboard, would be running in the external cluster. Any configuration that happens through the dashboard would depend on the orchestration modules in that external cluster. - -## Block Storage - -With the rook-ceph cluster created, the CSI driver integration will cover the Block (RWO) storage and no additional management is needed. - -### Pool - -When a pool CRD is created in the local cluster, the operator will create the pool in the external cluster. The pool settings will only be applied the first -time the pool is created and should be skipped thereafter. The ownership and lifetime of the pool will belong to the external cluster. -The local cluster should not apply pool settings to overwrite the settings defined in the external cluster. - -If the pool CRD is deleted from the local cluster, the pool will not be deleted in the external cluster. - -## Filesystem (MDS) - -A shared filesystem must only be created in the external cluster. Clients in the local cluster can connect to the MDS daemons in the external cluster. - -The same instance of CephFS cannot have MDS daemons in different clusters. The MDS daemons must exist in the same cluster for a given filesystem. -When the CephFilesystem CRD is created in the local cluster, Rook will ignore the request and print an error to the log. - -## Object Storage (RGW) - -An object store can be created that will start RGW daemons in the local cluster. -When the CephObjectStore CRD is created in the local cluster, the local Rook operator does the following: -1. Create the metadata and data pools in the external cluster (if they don't exist yet) -1. Create a realm, zone, and zone group in the external cluster (if they don't exist yet) -1. Start the RGW daemon in the local cluster -1. Local s3 clients will connect to the local RGW endpoints - -Question: Should we generate a unique name so an object store of the same name cannot be shared with the external cluster? Or should we allow -sharing of the object store between the two clusters if the CRD has the same name? If the admin wants to create independent object stores, -they could simply create them with unique CRD names. - -Assuming the object store can be shared with the external cluster, similarly to pools, the owner of the object store is the external cluster. -If the local cluster attempts to change the pool settings such as replication, they will be ignored. - -## Monitoring (prometheus) - -Rook already creates and injects service monitoring configuration, consuming what the ceph-mgr prometheus exporter module generates. -This enables the capability of a Kubernetes cluster to gather metrics from the external cluster and feed them in Prometheus. - -The idea is to allow Rook-Ceph to connect to an external ceph-mgr prometheus module exporter. - -1. Enhance external cluster script: - 1. the script tries to discover the list of managers IP addresses - 2. if provided by the user, the list of ceph-mgr IPs in the script is accepted via the new `--prometheus-exporter-endpoint` flag - -2. Add a new entry in the monitoring spec of the `CephCluster` CR: - -```go -// ExternalMgrEndpoints point to existing Ceph prometheus exporter endpoints -ExternalMgrEndpoints []v1.EndpointAddress `json:"externalMgrEndpoints,omitempty"` -} -``` - -So the CephCluster CR will look like: - -```yaml -monitoring: - # requires Prometheus to be pre-installed - enabled: true - externalMgrEndpoints: - - ip: "192.168.0.2" - - ip: "192.168.0.3" -``` - -3. Configure monitoring as part of `configureExternalCephCluster()` method - -4. Create a new metric Service - -5. Create an Endpoint resource based out of the IP addresses either discovered or provided by the user in the script diff --git a/design/ceph/ceph-managed-disruptionbudgets.md b/design/ceph/ceph-managed-disruptionbudgets.md deleted file mode 100644 index 236009e74..000000000 --- a/design/ceph/ceph-managed-disruptionbudgets.md +++ /dev/null @@ -1,64 +0,0 @@ -# Handling node drains through managed PodDisruptionBudgets. - -## Goals - -- Handle and block node drains that would cause data unavailability and loss. -- Unblock drains dynamically so that a rolling upgrade is made possible. -- Allow for rolling upgrade of nodes in automated kubernetes environments like [cluster-api](https://github.com/kubernetes-sigs/cluster-api) - - - -## Design - -### OSDs - -OSDs do not fit under the single PodDisruptionBudget pattern. Ceph's ability to tolerate pod disruptions in one failure domain is dependent on the overall health of the cluster. -Even if an upgrade agent were only to drain one node at a time, Ceph would have to wait until there were no undersized PGs before moving on the the next. - -The failure domain will be determined by the smallest failure domain of all the Ceph Pools in that cluster. -We begin with creating a single PodDisruptionBudget for all the OSD with maxUnavailable=1. This will allow one OSD to go down anytime. Once the user drains a node and an OSD goes down, we determine the failure domain for the draining OSD (using the OSD deployment labels). Then we create blocking PodDisruptionBudgets (maxUnavailable=0) for all other failure domains and delete the main PodDisruptionBudget. This blocks OSDs from going down in multiple failure domains simultaneously. - -Once the drained OSDs are back and all the pgs are active+clean, that is, the cluster is healed, the default PodDisruptionBudget (with maxUnavailable=1) is added back and the blocking ones are deleted. User can also add a timeout for the pgs to become healthy. If the timeout exceeds, the operator will ignore the pg health, add the main PodDisruptionBudget and delete the blocking ones. - -Detecting drains is not easy as they are a client side operation. The client cordons the node and continuously attempts to evict all pods from the node until it succeeds. Whenever an OSD goes into pending state, that is, `ReadyReplicas` count is 0, we assume that some drain operation is happening. - -Example scenario: - -- Zone x - - Node a - - osd.0 - - osd.1 -- Zone y - - Node b - - osd.2 - - osd.3 -- Zone z - - Node c - - osd.4 - - osd.5 - -1. Rook Operator creates a single PDB that covers all OSDs with maxUnavailable=1. -2. When Rook Operator sees an OSD go down (for example, osd.0 goes down): - - Create a PDB for each failure domain (zones y and z) with maxUnavailable=0 where the OSD did *not* go down. - - Delete the original PDB that covers all OSDs - - Now all remaining OSDs in zone x would be allowed to be drained -3. When Rook sees the OSDs are back up and all PGs are clean - - Restore the PDB that covers all OSDs with maxUnavailable=1 - - Delete the PDBs (in zone y and z) where maxUnavailable=0 - -An example of an operator that will attempt to do rolling upgrades of nodes is the Machine Config Operator in openshift. Based on what I have seen in -[SIG cluster lifecycle](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle), kubernetes deployments based on cluster-api approach will be -a common way of deploying kubernetes. This will also work to mitigate manual drains from accidentally disrupting storage. - -When an node is drained, we will also delay it's DOWN/OUT process by placing a noout on that node. We will remove that noout after a timeout. - -An OSD can be down due to reasons other than node drain, say, disk failure. In such a situation, if the pgs are unhealthy then rook will create a blocking PodDisruptionBudget on other failure domains to prevent further node drains on them. `noout` flag won't be set on node this is case. If the OSD is down but all the pgs are `active+clean`, the cluster will be treated as fully healthy. The default PodDisruptionBudget (with maxUnavailable=1) will be added back and the blocking ones will be deleted. - -### Mon, Mgr, MDS, RGW, RBDMirror - -Since there is no strict failure domain requirement for each of these, and they are not logically grouped, a stactic PDB will suffice. - -A single PodDisruptionBudget is created and owned by the respective controllers, and updated only according to changes in the CRDs that change the amount of pods. - -Eg: For a 3 Mon configuration, we can have PDB with the same labelSelector as the Deployment and have maxUnavailable as 1. -If the mon count is increased to 5, we can replace it with a PDB that has maxUnavailable set to 2. diff --git a/design/ceph/ceph-mon-pv.md b/design/ceph/ceph-mon-pv.md deleted file mode 100644 index 8599fe415..000000000 --- a/design/ceph/ceph-mon-pv.md +++ /dev/null @@ -1,138 +0,0 @@ -# Ceph monitor PV storage - -**Target version**: Rook 1.1 - -## Overview - -Currently all of the storage for Ceph monitors (data, logs, etc..) is provided -using HostPath volume mounts. Supporting PV storage for Ceph monitors in -environments with dynamically provisioned volumes (AWS, GCE, etc...) will allow -monitors to migrate without requiring the monitor state to be rebuilt, and -avoids the operational complexity of dealing with HostPath storage. - -The general approach taken in this design document is to augment the CRD with a -persistent volume claim template describing the storage requirements of -monitors. This template is used by Rook to dynamically create a volume claim for -each monitor. - -## Monitor storage specification - -The monitor specification in the CRD is updated to include a persistent volume -claim template that is used to generate PVCs for monitor database storage. - -```go -type MonSpec struct { - Count int `json:"count"` - AllowMultiplePerNode bool `json:"allowMultiplePerNode"` - VolumeClaimTemplate *v1.PersistentVolumeClaim -} -``` - -The `VolumeClaimTemplate` is used by Rook to create PVCs for monitor storage. -The current set of template fields used by Rook when creating PVCs are -`StorageClassName` and `Resources`. Rook follows the standard convention of -using the default storage class when one is not specified in a volume claim -template. If the storage resource requirements are not specified in the claim -template, then Rook will use a default value. This is possible because unlike -the storage requirements of OSDs (xf: StorageClassDeviceSets), reasonable -defaults (e.g. 5-10 GB) exist for monitor daemon storage needs. - -*Logs and crash data*. The current implementation continues the use of a -HostPath volume based on `dataDirHostPath` for storing daemon log and crash -data. This is a temporary exception that will be resolved as we converge on an -approach that works for all Ceph daemon types. - -Finally, the entire volume claim template may be left unspecified in the CRD -in which case the existing HostPath mechanism is used for all monitor storage. - -## Upgrades and CRD changes - -When a new monitor is created it uses the _current_ storage specification found -in the CRD. Once a monitor has been created, it's backing storage is not -changed. This makes upgrades particularly simple because existing monitors -continue to use the same storage. - -Once a volume claim template is defined in the CRD new monitors will be created -with PVC storage. In order to remove old monitors based on HostPath storage -first define a volume claim template in the CRD and then fail over each monitor. - -## Clean up - -Like `StatefulSets` removal of an monitor deployment does not automatically -remove the underlying PVC. This is a safety mechanism so that the data is not -automatically destroyed. The PVCs can be removed manually once the cluster is -healthy. - -## Requirements and configuration - -Rook currently makes explicit scheduling decisions for monitors by using node -selectors to force monitor node affinity. This means that the volume binding -should not occur until the pod is scheduled onto a specific node in the cluster. -This should be done by using the `WaitForFirstConsumer` binding policy on the -storage class used to provide PVs to monitors: - -``` -kind: StorageClass -volumeBindingMode: WaitForFirstConsumer -``` - -When using existing HostPath storage or non-local PVs that can migrate (e.g. -network volumes like RBD or EBS) existing monitor scheduling will continue to -work as expected. However, because monitors are scheduled without considering -the set of available PVs, when using statically provisioned local volumes Rook -expects volumes to be available. Therefore, when using locally provisioned -volumes take care to ensure that each node has storage provisioned. - -Note that these limitations are currently imposed because of the explicit -scheduling implementation in Rook. These restrictions will be removed or -significantly relaxed once monitor scheduling is moved under control of Kubernetes -itself (ongoing work). - -## Scheduling - -In previous versions of Rook the operator made explicit scheduling (placement) -decisions when creating monitor deployments. These decisions were made by -implementing a custom scheduling algorithm, and using the pod node selector to -enforce the placement decision. Unfortunately, schedulers are difficult to -write correctly, and manage. Furthermore, by maintaining a separate scheduler -from Kubernetes global policies are difficult to achieve. - -Despite the benefits of using the Kubernetes scheduler, there are important use -cases for using a node selector on a monitor deployment: pinning a monitor to a -node when HostPath-based storage is used. In this case Rook must prevent k8s -from moving a pod away from the node that contains its storage. The node -selector is used to enforce this affinity. Unfortunately, node selector use is -mutually exclusive with kubernetes scheduling---a pod cannot be scheduled by -Kubernetes and then atomically have its affinity set to that placement decision. - -The workaround in Rook is to use a temporary _canary pod_ that is scheduled by -Kubernetes, but whose placement is enforced by Rook. The canary deployment is a -deployment configured identically to a monitor deployment, except the container -entrypoints have no side affects. The canary deployments are used to solve a -fundamental bootstrapping issue: we want to avoid making explicit scheduling -decisions in Rook, but in some configurations a node selector needs to be used -to pin a monitor to a node. - -### Health checks - -Previous versions of Rook performed periodic health checks that included checks -on monitor health as well as looking for scheduling violations. The health -checks related to scheduling violations have been removed. Fundamentally a -placement violation requires understanding or accessing the scheduling algorithm. - -The rescheduling or eviction aspects of Rook's scheduling caused more problems -than it helped, so going with K8s scheduling is the right thing. If/when K8s -has eviction policies in the future we could then make use of it (e.g. with -`*RequiredDuringExecution` variants of anti-affinity rules are available). - -### Target Monitor Count - -The `PreferredCount` feature has been removed. - -The CRD monitor count specifies a target minimum number of monitors to maintain. -Additionally, a preferred count is available which will be the desired number of -sufficient number of nodes are available. Unfortunately, this calculation -relies on determining if monitor pods may be placed on a node, requiring -knowledge of the scheduling policy and algorithm. The scenario to watch out for -is an endless loop in which the health check is determining a bad placement but -the k8s schedule thinks otherwise. diff --git a/design/ceph/ceph-nfs-ganesha.md b/design/ceph/ceph-nfs-ganesha.md deleted file mode 100644 index 6619e3443..000000000 --- a/design/ceph/ceph-nfs-ganesha.md +++ /dev/null @@ -1,182 +0,0 @@ -# Ceph NFS-Ganesha CRD - -[NFS-Ganesha] is a user space NFS server that is well integrated with [CephFS] -and [RGW] backends. It can export Ceph's filesystem namespaces and Object -gateway namespaces over NFSv4 protocol. - -Rook already orchestrates Ceph filesystem and Object store (or RGW) on -Kubernetes (k8s). It can be extended to orchestrate NFS-Ganesha server daemons -as highly available and scalable NFS gateway pods to the Ceph filesystem and -Object Store. This will allow NFS client applications to use the Ceph filesystem -and object store setup by rook. - -This feature mainly differs from the feature to add NFS as an another -storage backend for rook (the general NFS solution) in the following ways: - -* It will use the rook's Ceph operator and not a separate NFS operator to - deploy the NFS server pods. - -* The NFS server pods will be directly configured with CephFS or RGW - backend setup by rook, and will not require CephFS or RGW to be mounted - in the NFS server pod with a PVC. - -## Design of Ceph NFS-Ganesha CRD - -The NFS-Ganesha server settings will be exposed to Rook as a -Custom Resource Definition (CRD). Creating the nfs-ganesha CRD will launch -a cluster of NFS-Ganesha server pods that will be configured with no exports. - -The NFS client recovery data will be stored in a Ceph RADOS pool; and -the servers will have stable IP addresses by using [k8s Service]. -Export management will be done by updating a per-pod config file object -in RADOS by external tools and issuing dbus commands to the server to -reread the configuration. - -This allows the NFS-Ganesha server cluster to be scalable and highly available. - -### Prerequisites - -- A running rook Ceph filesystem or object store, whose namespaces will be - exported by the NFS-Ganesha server cluster. - e.g., - ``` - kubectl create -f cluster/examples/kubernetes/ceph/filesystem.yaml - ``` - -- An existing RADOS pool (e.g., CephFS's data pool) or a pool created with a - [Ceph Pool CRD] to store NFS client recovery data. - - -### Ceph NFS-Ganesha CRD - -The NFS-Ganesha CRD will specify the following: - -- Number of active Ganesha servers in the cluster - -- Placement of the Ganesha servers - -- Resource limits (memory, CPU) of the Ganesha server pods - -- RADOS pool and namespace where backend objects will be stored (supplemental - config objects and recovery backend objects) - - -Below is an example NFS-Ganesha CRD, `nfs-ganesha.yaml` - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephNFS -metadata: - # The name of Ganesha server cluster to create. It will be reflected in - # the name(s) of the ganesha server pod(s) - name: mynfs - # The namespace of the Rook cluster where the Ganesha server cluster is - # created. - namespace: rook-ceph -spec: - # NFS client recovery storage settings - rados: - # RADOS pool where NFS client recovery data and per-daemon configs are - # stored. In this example the data pool for the "myfs" filesystem is used. - # If using the object store example, the data pool would be - # "my-store.rgw.buckets.data". Note that this has nothing to do with where - # exported CephFS' or objectstores live. - pool: myfs-data0 - # RADOS namespace where NFS client recovery data and per-daemon configs are - # stored. - namespace: ganesha-ns - - # Settings for the ganesha server - server: - # the number of active ganesha servers - active: 3 - # where to run the nfs ganesha server - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - mds-node - # tolerations: - # - key: mds-node - # operator: Exists - # podAffinity: - # podAntiAffinity: - # The requests and limits set here allow the ganesha pod(s) to use half of - # one CPU core and 1 gigabyte of memory - resources: - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # the priority class to set to influence the scheduler's pod preemption - priorityClassName: -``` - -When the nfs-ganesha.yaml is created the following will happen: - -- Rook's Ceph operator sees the creation of the NFS-Ganesha CRD. - -- The operator creates as many [k8s Deployments] as the number of active - Ganesha servers mentioned in the CRD. Each deployment brings up a Ganesha - server pod, a replicaset of size 1. - -- The ganesha servers, each running in a separate pod, use a mostly-identical - ganesha config (ganesha.conf) with no EXPORT definitions. The end of the - file will have it do a %url include on a pod-specific RADOS object from which - it reads the rest of its config. - -- The operator creates a k8s service for each of the ganesha server pods - to allow each of the them to have a stable IP address. - -The ganesha server pods constitute an active-active high availability NFS -server cluster. If one of the active Ganesha server pods goes down, k8s brings -up a replacement ganesha server pod with the same configuration and IP address. -The NFS server cluster can be scaled up or down by updating the -number of the active Ganesha servers in the CRD (using `kubectl edit` or -modifying the original CRD and running `kubectl apply -f `). - -### Per-node config files -After loading the basic ganesha config from inside the container, the node -will read the rest of its config from an object in RADOS. This allows external -tools to generate EXPORT definitions for ganesha. - -The object will be named "conf-.", where metadata.name -is taken from the CRD and the index is internally generated. It will be stored -in `rados.pool` and `rados.namespace` from the above CRD. - -### Consuming the NFS shares - -An external consumer will fetch the ganesha server IPs by querying the k8s -services of the Ganesha server pods. It should have network access to the -Ganesha pods to manually mount the shares using a NFS client. Later, support -will be added to allow user pods to easily consume the NFS shares via PVCs. - -## Example use-case - -The NFS shares exported by rook's ganesha server pods can be consumed by -[OpenStack] cloud's user VMs. To do this, OpenStack's shared file system -service, [Manila] will provision NFS shares backed by CephFS using rook. -Manila's [CephFS driver] will create NFS-Ganesha CRDs to launch ganesha server -pods. The driver will dynamically add or remove exports of the ganesha server -pods based on OpenStack users' requests. The OpenStack user VMs will have -network connectivity to the ganesha server pods, and manually mount the shares -using NFS clients. - -[NFS-Ganesha]: https://github.com/nfs-ganesha/nfs-ganesha/wiki -[CephFS]: http://docs.ceph.com/docs/master/cephfs/nfs/ -[RGW]: http://docs.ceph.com/docs/master/radosgw/nfs/ -[Rook toolbox]: (/Documentation/ceph-toolbox.md) -[Ceph manager]: (http://docs.ceph.com/docs/master/mgr/) -[OpenStack]: (https://www.openstack.org/software/) -[Manila]: (https://wiki.openstack.org/wiki/Manila) -[CephFS driver]: (https://github.com/openstack/manila/blob/master/doc/source/admin/cephfs_driver.rst) -[k8s ConfigMaps]: (https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) -[k8s Service]: (https://kubernetes.io/docs/concepts/services-networking/service) -[Ceph Pool CRD]: (https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md) -[k8s Deployments]: (https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) diff --git a/design/ceph/ceph-openshift-fencing-mitigation.md b/design/ceph/ceph-openshift-fencing-mitigation.md deleted file mode 100644 index 1b0cbec0e..000000000 --- a/design/ceph/ceph-openshift-fencing-mitigation.md +++ /dev/null @@ -1,72 +0,0 @@ -# Handling Openshift Fencing through managed MachineDisruptionBudgets - -## Goals - -- Use the MachineDisruptionBudget to ensure that OCP 4.x style fencing does not cause data unavailability and loss. - -## What is fencing in OCP 4.x? - -Openshift uses `Machines` and `MachineSets` from the [cluster-api](https://github.com/kubernetes-sigs/cluster-api) to dynamically provisions nodes. Fencing is a remediation method that reboots/deletes `Machine` CRDs to solve problems with automatically provisioned nodes. - -Once [MachineHealthCheck controller](https://github.com/openshift/machine-api-operator#machine-healthcheck-controller) detects that a node is `NotReady` (or some other configured condition), it will remove the associated `Machine` which will cause the node to be deleted. The `MachineSet` controller will then replace the `Machine` via the machine-api. The exception is on baremetal platforms where fencing will reboot the underlying `BareMetalHost` object instead of deleting the `Machine`. - - -## Why can't we use `PodDisruptionBudget`? - -Fencing does not use the eviction api. It is for `Machine`s and not `Pod`s. - -## Will we need to do large storage rebalancing after fencing? - -Hopefully not. On cloud platforms, the OSDs can be rescheduled on new nodes along with their backing PVs, and on baremetal where the local PVs are tied to a node, fencing will simply reboot the node instead of destroying it. - - -# Problem Statement -We need to ensure that only one node can be fenced at a time and that Ceph is fully recovered (has PGs clean) before any fencing is initiated. The available pattern for limiting fencing is the MachineDisruptionBudget which allows us to specify maxUnavailable. However, this won’t be sufficient to ensure that Ceph has recovered before fencing is initiated as MachineHealthCheck does not check anything other than the node state. - -Therefore, we will control how many nodes match the MDB by dynamically adding and removing labels as well as dynamically updating the MDB. By manipulating the MDB into a state where desiredHealthy > currentHealthy, we can disable fencing on the nodes the MDB points to. - -# Design: - -We will implement two controllers `machinedisruptionbudget-controller` and the `machine-controller` to be implemented through the controller pattern describere [here](https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg#hdr-Controller_Writing_Tips). Each controller watches a set of object kinds and reconciles one. - -The bottom line is that fencing is blocked if the PG state in not active+clean, but fencing continues on `Machine`s without the label which indicates that OSD resources are running there. - -## machinedisruptionbudget-controller -This controller watches ceph PGs and CephClusters. We will ensure the reconciler is enqueued every 60s. It ensures that each CephCluster has a MDB created, and the MDB's value of maxUnvailable reflects the health of the Ceph Cluster's PGs. -If all PGs are clean, maxUnavailable = 1. -else, maxUnavailable = 0. - -We can share a ceph health cache with the other controller-runtime reconcilers that have to watch the PG "cleanliness". - -The MDB will target `Machine`s selected by a label maintainted by the `machine-controller`. The label is `fencegroup.rook.io/`. - -## machine-controller -This controller watches OSDs and `Machine`s. It ensures that each `Machine` with OSDs from a `CephCluster` have the label `fencegroup.rook.io/`, and those that do not have running OSDs do not have label. - -This will ensure that no `Machine` without running OSDs will be protected by the MDB. - -## Assumptions: -- We assume that the controllers will be able to reconcile multiple times in < 5 minutes as we know that fencing will happen only after a configurable timeout. The default timeout is 5 minutes. - This is important because the MDB must be reconciled based on an accurate ceph health state in that time. - - -## Example Flows: - -**Node needs to be fenced, the OSDs on the node are down too** - - - Node has NotReady condition. - - Some Ceph PGs are not active+clean. - - machinedisruptionbudget-controller sets maxUnavailable to 0 on the MachineDisruptionBudget. - - MachineHealthCheck sees NotReady and attempts to fence after 5 minutes, but can't due to MDB - - machine-controller notices all OSDs on the affected node are down and removes the node from the MDB. - - MDB no longer covers the affected node, and MachineHealthCheck fences it. - -**Node needs to be fenced, but the OSDs on the node are up** - - - Node has NotReady condition. - - Ceph PGs are all active+clean so maxUnavailable remains 1 on the MDB. - - MachineHealthCheck fences the Node. - - Ceph resources on the node go down. - - Some Ceph PGs are now not active+clean. - - machinedisruptionbudget-controller sets maxUnavailble to 0 on the MachineDisruptionBudget. - - If another labeled node needs to be fenced, it will only happen after the Ceph PGs become active+clean again when the OSDs are rescheduled and backfilled. diff --git a/design/ceph/ceph-stretch-cluster.md b/design/ceph/ceph-stretch-cluster.md deleted file mode 100644 index a605613ce..000000000 --- a/design/ceph/ceph-stretch-cluster.md +++ /dev/null @@ -1,225 +0,0 @@ -# Stretch Cluster with an Arbiter - -Target version: 1.5 - -## Summary - -In a production environment it is generally expected to have three failure domains where three replicas -of the data are stored. If any individual failure domain goes down, the data is still available for -reads and writes in the other two failure domains. - -For environments that only have two failure domains available where data can be replicated, we need to -also support the case where one failure domain is lost and the data is still fully available in the -remaining failure domain. - -To support this scenario, Ceph has recently integrated support for stretch clusters with an arbiter mon -as seen in [this PR](https://github.com/ceph/ceph/pull/35906) and in the following design docs: -- [Stretch clusters](https://github.com/ceph/ceph/blob/master/doc/rados/operations/stretch-mode.rst) -- Mon election [strategies](https://github.com/ceph/ceph/blob/master/doc/rados/operations/change-mon-elections.rst) and [design](https://github.com/ceph/ceph/blob/master/doc/dev/mon-elections.rst) - -Rook will enable the stretch clusters when requested by the admin through the usual Rook CRDs. - -## Architecture - -To enable the stretch cluster based on the Ceph architecture: -- Rook requires three zones -- Two zones (A and B) will each run all types of Rook pods. We call these the "data" zones. - - Two mons run in each data zone for two reasons: - - The OSDs can only connect to the mon in their own zone so we need more than one mon in the data zones. - - A zone is considered down by Ceph when the mons in the zone are all unavailable. -- The third zone (Arbiter): - - Runs a single mon, called the "tiebreaker" or "arbiter" - - No other Rook or Ceph daemons will be run in the arbiter zone - -![Stretch Cluster](stretchcluster.png) - -The arbiter zone will commonly contain just a single node that is also a K8s master node, -although the arbiter zone may certainly contain more nodes. - -The type of failure domain used for stretch clusters is commonly "zone", but can be set to a different failure domain. - -### Latency - -Distributed systems are impacted by the network latency between critical components. -In a stretch cluster, the critical latency is in the Etcd servers configured with Kubernetes. -K8s only supports latency of up to 5ms (10ms round trip), which is lower than the latency -requirements for any Ceph components. Ceph mons can handle higher latency, designed for up to -700ms round trip. - -## Design - -### Domain Failure Configuration - -The topology of the K8s cluster is to be determined by the admin, outside the scope of Rook. -Rook will simply detect the topology labels that have been added to the nodes. - -If the desired failure domain is a "zone", the `topology.kubernetes.io/zone` label should -be added to the nodes. Any of the [topology labels](https://rook.io/docs/rook/master/ceph-cluster-crd.html#osd-topology) -supported by OSDs can be used. - -In the minimum configuration, two nodes in each data zone would be labeled, while one -node in the arbiter zone is required. - -For example: - -```yaml -topology.kubernetes.io/zone=a -topology.kubernetes.io/zone=a -topology.kubernetes.io/zone=b -topology.kubernetes.io/zone=b -topology.kubernetes.io/zone=arbiter -``` - -### Rook Cluster Design - -The core changes to rook are to associate the mons with the required zones. - -- Five mons are required. No other count of mons is supported for stretch clusters. -- One mon will be assigned to the arbiter zone -- Two mons will each be assigned to the data zones - - The two mons within each zone will have node antiaffinity - -The new configuration is found under the `stretchCluster` configuration where the -three zones must be listed and the arbiter zone is identified. - -```yaml - mon: - count: 5 - allowMultiplePerNode: false - stretchCluster: - # The cluster is most commonly stretched over zones, but could also be stretched over - # another failure domain such as datacenter or region. Must be one of the labels - # used by OSDs as documented at https://rook.io/docs/rook/master/ceph-cluster-crd.html#osd-topology. - failureDomainLabel: topology.kubernetes.io/zone - zones: - # There must be exactly three zones in this list, with one of them being the arbiter - - name: arbiter - arbiter: true - - name: a - - name: b -``` - -The operator will track which mons belong to which zone. The zone assignment for each mon -will be stored in the `rook-ceph-mon-endpoints` configmap in the same data structure where the host -assignment is stored for mons with node affinity. - -For example, if the zones are called -"arbiter", "zone1", and "zone2", the configmap would contain: - -``` -data: - data: a=10.99.109.200:6789,b=10.98.18.147:6789,c=10.96.86.248:6789,d=10.96.86.249:6789,e=10.96.86.250:6789 - mapping: '{"node":{"a":"arbiter","b":"zone1","c":"zone1","d":"zone2","e":"zone2"}}' - maxMonId: "4" -``` - -If no zones are listed in the `stretchCluster.zones`, it is not considered a stretch cluster. -If anything other than 3 zones, it would be an error and the cluster would not be configured. - -### Mon Failover - -Mon failover will replace a mon in the same zone where the previous mon failed. - -### Mon Storage - -The mons can all be backed by a host path or a PVC, similar to any other Rook cluster. - -In this example, all the mons will be backed by the same PVC: - -```yaml - mon: - count: 5 - allowMultiplePerNode: false - stretchCluster: - zones: - - name: arbiter - arbiter: true - - name: a - - name: b - volumeClaimTemplate: - spec: - storageClassName: gp2 - resources: - requests: - storage: 10Gi -``` - -It is possible that the mons will need to be backed by a different type of storage in different zones. -In that case, a volumeClaimTemplate specified under the zone will override the default storage setting for -the mon. - -For example, the arbiter could specify a different backing store in this manner: - -```yaml - mon: - count: 5 - allowMultiplePerNode: false - stretchCluster: - zones: - - name: arbiter - arbiter: true - volumeClaimTemplate: - spec: - storageClassName: alternative-storage - resources: - requests: - storage: 10Gi - - name: a - - name: b - volumeClaimTemplate: - spec: - storageClassName: gp2 - resources: - requests: - storage: 10Gi -``` - -Question: Is there a need for a dataDirHostPath to be specified for the mons in one zone -when the other mons are using the PVCs? For now, we assume all the mons are either using -a PVC or a host path, but not a mix. - -### Ceph Config - -Rook will set the mon election strategy to the new "connectivity" algorithm. - -```console -mon election default strategy: 3 -``` - -The mons will be configured so Ceph will associate the mons with the correct failure domains. -Rook will set each mon location to the zone name - -```console -$ ceph mon set_location -``` - -The stretch mode will be enabled with the command: - -```console -$ ceph mon enable_stretch_mode tiebreaker_mon new_crush_rule dividing_bucket zone -``` - -### Pools - -For data protection in the stretch cluster, all pools should be created with the following configuration, including -pools for an rbd storage class (CephBlockPool), shared filesystem (CephFilesystem), or object store (CephObjectStore). -- Replica: 4 -- Failure domain: zone -- Two replicas of data are stored in each zone using a special CRUSH rule specified by `replicasPerFailureDomain: 2` - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: stretchedreplica - namespace: rook-ceph -spec: - failureDomain: zone - replicated: - size: 4 - replicasPerFailureDomain: 2 -``` - -See [this issue](https://github.com/rook/rook/issues/5591) for more details on implementation of the CRUSH rule. - -Erasure coded pools are not supported currently in Ceph stretch clusters. diff --git a/design/ceph/ceph-volume-provisioning.md b/design/ceph/ceph-volume-provisioning.md deleted file mode 100644 index e600c301c..000000000 --- a/design/ceph/ceph-volume-provisioning.md +++ /dev/null @@ -1,143 +0,0 @@ -# ceph-volume OSD Provisioning - -**Targeted for v0.9** - -Provisioning OSDs today is done directly by Rook. This needs to be simplified and improved by building -on the functionality provided by the `ceph-volume` tool that is included in the ceph image. - -## Legacy Design - -As Rook is implemented today, the provisioning has a lot of complexity around: - -- Partitioning of devices for bluestore -- Partitioning and configuration of a `metadata` device where the WAL and DB are placed on a different device from the data -- Support for both directories and devices -- Support for bluestore and filestore - -Since this is mostly handled by `ceph-volume` now, Rook should replace its own provisioning code and rely on `ceph-volume`. - -## ceph-volume Design - -`ceph-volume` is a CLI tool included in the `ceph/ceph` image that will be used to configure and run Ceph OSDs. -`ceph-volume` will replace the OSD provisioning mentioned previously in the legacy design. - -At a high level this flow remains unchanged from the flow in the [one-osd-per-pod design](dedicated-osd-pod.md#create-new-osds). -No new jobs or pods need to be launched from what we have today. The sequence of events in the OSD provisioning will be the following. - -- The cluster CRD specifies what nodes/devices to configure with OSDs -- The operator starts a provisioning job on each node where OSDs are to be configured -- The provisioning job: - - Detects what devices should be configured - - Calls `ceph-volume lvm batch` to prepare the OSDs on the node. A single call is made with all of the devices unless more specific settings are included for LVM and partitions. - - Calls `ceph-volume lvm list` to retrieve the results of the OSD configuration. Store the results in a configmap for the operator to take the next step. -- The operator starts a deployment for each OSD that was provisioned. `rook` is the entrypoint for the container. - - The configmap with the osd configuration is loaded with info such as ID, FSID, bluestore/filestore, etc - - `ceph-volume lvm activate` is called to activate the osd, which mounts the config directory such as `/var/lib/ceph/osd-0`, using a tempfs mount. The OSD options such as `--bluestore`, `--filestore`, `OSD_ID`, and `OSD_FSID` are passed to the command as necessary. - - The OSD daemon is started with `ceph-osd` - - When `ceph-osd` exits, `rook` will exit and the pod will be restarted by K8s. - -### New Features - -`ceph-volume` enables rook to expose several new features: - -- Multiple OSDs for a single device, which is ideal for NVME devices. -- Configure OSDs on LVM, either consuming the existing LVM or automatically configuring LVM on the raw devices. -- Encrypt the OSD data with dmcrypt - -The Cluster CRD will be updated with the following settings to enable these features. All of these settings can be specified -globally if under the `storage` element as in this example. The `config` element can also be specified under individual -nodes or devices. -```yaml - storage: - config: - # whether to encrypt the contents of the OSD with dmcrypt - encryptedDevice: "true" - # how many OSDs should be configured on each device. only recommended to be greater than 1 for NVME devices - osdsPerDevice: 1 - # the class name for the OSD(s) on devices - crushDeviceClass: ssd -``` - -If more flexibility is needed that consuming raw devices, LVM or partition names can also be used for specific nodes. -Properties are shown for both bluestore and filestore OSDs. - -```yaml - storage: - nodes: - - name: node2 - # OSDs on LVM (open design question: need to re-evaluate the logicalDevice settings when they are implemented after 0.9 and whether they should be under the more general storage node "config" settings) - logicalDevices: - # bluestore: the DB, WAL, and Data are on separate LVs - - db: db_lv1 - wal: wal_lv1 - data: data_lv1 - dbVolumeGroup: db_vg - walVolumeGroup: wal_vg - dataVolumeGroup: data_vg - # bluestore: the DB, WAL, and Data are all on the same LV - - volume: my_lv1 - volumeGroup: my_vg - # filestore: data and journal on the same LV - - data: my_lv2 - dataVolumeGroup: my_vg - # filestore: data and journal on different LVs - - data: data_lv3 - dataVolumeGroup: data_vg - journal: journal_lv3 - journalVolumeGroup: journal_vg - # devices support both filestore and bluestore configurations based on the "config.storeType" setting at the global, node, or device level - devices: - # OSD on a raw device - - name: sdd - # OSD on a partition (partition support is new) - - name: sdf1 - # Multiple OSDs on a high performance device - - name: nvme01 - config: - osdsPerDevice: 5 -``` - -The above options for LVM and partitions look very tedious. Questions: - -- Is it useful at this level of complexity? -- Is there a simpler way users would configure LVM? -- Do users need all this flexibility? This looks like too many options to maintain. - -### Backward compatibility - -Rook will need to continue supporting clusters that are running different types of OSDs. All of the v0.8 OSDs must continue running -after Rook is upgraded to v0.9 and beyond, whether they were filestore or bluestore running on directories or devices. - -Since `ceph-volume` only supports devices that have **not** been previously configured by Rook: - -- Rook will continue to provision OSDs directly when a `directory` is specified in the CRD - - Support for creating new OSDs on directories will be deprecated. While directories might still be used for test scenarios, - it's not a mainline scenario. With the legacy design, directories were commonly used on LVM, but LVM is now directly supported. - In v0.9, support for directories will remain, but documentation will encourage users to provision devices. -- For existing devices configured by Rook, `ceph-volume` will be skipped and the OSDs will be started as previously -- New devices will be provisioned with `ceph-volume` - -### Versioning - -Rook relies on very recent developments in `ceph-volume` that are not yet available in luminous or mimic releases. -For example, rook needs to run the command: -``` -ceph-volume lvm batch --prepare -``` - -The `batch` command and the flag `--prepare` have been added recently. -While the latest `ceph-volume` changes will soon be merged to luminous and mimic, Rook needs to know if it is running an image that contains the required functionality. - -To detect if `ceph-volume` supports the required options, Rook will run the -command with all the flags that are required. To avoid side effects when testing for the version of `ceph-volume`, no devices -are passed to the `batch` command. -``` -ceph-volume lvm batch --prepare -``` - -- If the flags are supported, `ceph-volume` has an exit code of `0`. -- If the flags are not supported, `ceph-volume` has an exit code of `2`. - -Since Rook orchestrates different versions of Ceph, Rook (at least initially) will need to support running images that may not -have the features necessary from `ceph-volume`. When a supported version of `ceph-volume` is not detected, Rook will -execute the legacy code to provision devices. diff --git a/design/ceph/cluster-update.md b/design/ceph/cluster-update.md deleted file mode 100644 index 350f22fc1..000000000 --- a/design/ceph/cluster-update.md +++ /dev/null @@ -1,212 +0,0 @@ -# Cluster Updates - -## Background -Currently, a Rook admin can declare how they want their cluster deployed by specifying values in the [Cluster CRD](../Documentation/ceph-cluster-crd.md). -However, after a cluster has been initially declared and deployed, it is not currently possible to update the Cluster CRD and have those desired changes reflected in the actual cluster state. -This document will describe a design for how cluster updating can be implemented, along with considerations, trade-offs, and a suggested scope of work. - -## Design Overview -As previously mentioned, the interface for a user who wants to update their cluster will be the Cluster CRD. -To specify changes to a Rook cluster, the user could run a command like the following: -```console -kubectl -n rook-ceph edit cluster.ceph.rook.io rook-ceph -``` -This will bring up a text editor with the current value of the cluster CRD. -After their desired edits are made, for instance to add a new storage node, they will save and exit the editor. -Of course, it is also possible to update a cluster CRD via the Kubernetes API instead of `kubectl`. - -This will trigger an update of the CRD object, which the operator is already subscribed to events for. -The update event is provided both the new and old cluster objects, making it possible to perform a diff between desired and actual state. -Once the difference is calculated, the operator will begin to bring actual state in alignment with desired state by performing similar operations to what it does to create a cluster in the first place. -Controllers, pod templates, config maps, etc. will be updated and configured with the end result of the Rook cluster pods and state representing the users desired cluster state. - -The most common case for updating a Rook cluster will be to add and remove storage resources. -This will essentially alter the number of OSDs in the cluster which will cause data rebalancing and migration. -Therefore, updating storage resources should be performed by the operator with special consideration as to not degrade cluster performance and health beyond acceptable levels. - -## Design Details -### Cluster CRD -The Cluster CRD has many fields, but not all of them will be updatable (i.e., the operator will not attempt to make any changes to the cluster for updates to some fields). -#### Supported Fields -The following fields will be **supported** for updates: -* `mon`: Ceph mon specific settings can be changed. - * `count`: The number of monitors can be updated and the operator will ensure that as monitors are scaled up or down the cluster remains in quorum. - * `allowMultiplePerNode`: The policy to allow multiple mons to be placed on one node can be toggled. -* `deviceFilter`: The regex filter for devices allowed to be used for storage can be updated and OSDs will be added or removed to match the new filter pattern. -* `devicePathFilter`: The regex filter for paths of devices allowed to be used for storage can be updated and OSDs will be added or removed to match the new filter pattern. -* `useAllDevices`: If this value is updated to `true`, then OSDs will be added to start using all devices on nodes. -However, if this value is updated to `false`, the operator will only allow OSDs to be removed if there is a value set for `deviceFilter`. -This is to prevent an unintentional action by the user that would effectively remove all data in the cluster. -* `useAllNodes`: This value will be treated similarly to `useAllDevices`. -Updating it to `true` is a safe action as it will add more nodes and their storage to the cluster, but updating it to `false` is not always a safe action. -If there are no individual nodes listed under the `nodes` field, then updating this field to `false` will not be allowed. -* `resources`: The CPU and memory limits can be dynamically updated. -* `placement`: The placement of daemons across the cluster can be updated, but it is dependent on the specific daemon. -For example, monitors can dynamically update their placement as part of their ongoing health checks. -OSDs can not update their placement at all since they have data gravity that is tied to specific nodes. -Other daemons can decide when and how to update their placement, for example doing nothing for current pods and only honoring new placement settings for new pods. -* `nodes`: Specific storage nodes can be added and removed, as well as additional properties on the individual nodes that have not already been described above: - * `devices`: The list of devices to use for storage can have entries added and removed. - * `directories`: The list of directories to use for storage can also be updated. - -#### Unsupported Fields -All other properties not listed above are **not supported** for runtime updates. -Some particular unsupported fields to note: -* `dataDirHostPath`: Once the local host directory for storing cluster metadata and config has been set and populated, migrating it to a new location is not supported. -* `hostNetwork`: After the cluster has been initialized to either use host networking or pod networking, the value can not be changed. -Changing this value dynamically would very likely cause a difficult to support transition period while pods are transferring between networks and would certainly impact cluster health. - -#### Validation -It is in the user's best interests to provide early feedback if they have made an update to their Cluster CRD that is invalid or not supported. -Along with [issue 1000](https://github.com/rook/rook/issues/1000), we should use the Kubernetes CRD validation feature to verify any changes to the Cluster CRD and provide helpful error messages in the case that their update can not be fulfilled. - -#### Device Name Changes -It is important to remember that [Linux device names can change across reboots](https://wiki.archlinux.org/index.php/persistent_block_device_naming). -Because of this, we need to be very careful when determining whether it is a safe operation to remove an OSD. -We need to be absolutely sure that the user really intended to remove the OSD from a device, as opposed to the device name randomly changing and becoming out of the device filter or list. - -What is especially challenging here is that before the initial deployment of OSDs onto a node, which creates the UUIDs for each device, there is no known consistent and user friendly way to specify devices. -A lot of environments do **not** have labels, IDs, UUIDs, etc. for their devices at first boot and the only way to address them is by device name, such as `sda`. -This is unfortunate because it is a volatile identifier. -Some environments do have IDs at first boot and we should consider allowing users to specify devices by those IDs instead of names in the near future. -That effort is being tracked by [issue 1228](https://github.com/rook/rook/issues/1228). - -The main approach that will be taken to solve this issue is to always compare the device UUID from a node's saved OSD config map against the device UUIDs of the current set of device names. -If the two do not match, then it is not a safe operation to remove the OSD from the device. -Let's walk through a couple simple scenarios to illustrate this approach: - -**NOT SAFE: Device name has changed, but filter has not been updated by the user:** -* User initially specifies `sda` via device filter or list. Rook configures `sda` and gets an OSD up and running. -* The node reboots which causes the OSD pod to restart. -* The filter still specifies `sda`, but the device has changed its name to `sdb`. The device is now out of the filter. -* We look at the node's saved OSD config and see that we originally set up `sda` with device UUID `wxyz-1234`. -* The user's filter still says to use `sda`, so going by the saved config and not what the current devices names are, we know that the old `sda` (device UUID `wxyz-1234`), which is now `sdb` should NOT be removed. - -**SAFE: User has updated the filter and the device name has not changed:** -* User initially specifies `sda` via device filter or list. Rook configures `sda` and gets an OSD up and running. -* User updates the Cluster CRD to change the device filter or list to now be `sdb`. -* The OSD pod restarts and when it comes back up it sees that the previously configured `sda` is no longer in the filter. -* The pod checks the device UUID of `sda` in its saved config and compares that to the device UUID of the current `sda`. -* The two match, so the pod knows it's a safe (user intended) operation to remove the OSD from `sda`. - -### Orchestration -When the operator receives an event that the Cluster CRD has been updated, it will need to perform some orchestration in order to bring actual state of the cluster in agreement with the desired state. -For example, when `mon.count` is updated, the operator will add or remove a single monitor at a time, ensuring that quorum is restored before moving onto the next monitor. -Updates to the storage spec for the cluster require even more careful consideration and management by the operator, which will be discussed in this section. - -First and foremost, changes to the cluster state should not be carried out when the cluster is not in a healthy state. -The operator should wait until cluster health is restored until any orchestration is carried out. - -It is important to remember that a single OSD pod can contain multiple OSD processes and that the operator itself does not have detailed knowledge of the storage resources of each node. -More specifically, the devices that can be used for storage (e.g., match `deviceFilter`) is not known until the OSD pod has been started on a given node. - -As mentioned previously, it is recommended to make storage changes to the cluster one OSD at a time. -Therefore, the operator and the OSD pods will need to coordinate their efforts in order to adhere to this guidance. -When a cluster update event is received by the operator, it will work on a node by node basis, ensuring all storage updates are completed by the OSD pod for that node before moving to the next. - -When an OSD pod starts up and has completed its device discovery, it will need to perform a diff of the desired storage against the actual storage that is currently included in the cluster. -This diff will determine the set of OSD instances that need to be removed or added within the pod. -Fortunately, the OSD pod start up is already idempotent and already handles new storage additions, so the remaining work will be the following: -* Safely removing existing OSDs from the cluster -* Waiting for data migration to complete and all placement groups to become clean -* Signaling to the operator that the pod has completed its storage updates - -We should consider an implementation that allows the OSD pod to refresh it's set of OSDs without restarting the entire pod, but since the replication controller's pod template spec needs to be updated by the operator in order to convey this information to the pod, we may need to live with restarting the pod either way. -Remember that this will be done one node at a time to mitigate impact to cluster health. - -Also, other types of update operations to the cluster (e.g., software upgrade) should be blocked while a cluster update is ongoing. - -#### Cluster CRD Status -The Cluster CRD status will be kept up to date by the operator so the user has some insight into the process being carried out. -While the operator is carrying out an update to the cluster, the Cluster CRD `status` will be set to `updating`. -If there are any errors during the process, the `message` field will be updated with a specific reason for the failure. -We should also update documentation for our users with easy commands to query the status and message fields so they can get more information easily. - -#### Operator and OSD pod communication -As mentioned previously, the OSD pods need to communicate to the operator when they are done orchestrating their local OSD instance changes. -To make this effort more resilient and tolerant of operator restarts, this effort should be able to be resumed. -For example, if the operator restarts while an OSD pod is draining OSDs, the operator should **not** start telling other OSD pods to do work. - -The OSDs and operator will jointly maintain a config map to track the status of storage update operations within the cluster. -When the operator initially requests an OSD pod to compute its storage diff, it will update a config map with an entry for the OSD containing a status of `computingDiff` and a current timestamp. -When the OSD pod has finished computation and started orchestrating changes, it will update the entry with a status of `orchestrating` and a current timestamp. -Finally, when the pod has finished, it will update the entry with `completed` and a current timestamp again, letting the operator know it is safe to move onto the next node. - -If the operator is restarted during this flow, it will look in the config map for any OSD pod that is not in the `completed` state. -If it finds any, then it will wait until they are completed before moving onto another node. -This approach will ensure that only 1 OSD pod is performing changes at a time. -Note that this approach can also be used to ask an OSD pod to compute changes without having to restart the pod needlessly. -If the OSD pods are watching the config map for changes, then they can compute a diff upon request of the operator. - -### Storage Update Process -This section covers the general sequence for updating storage resources and outlines important considerations for cluster health. -Before any changes begin, we will temporarily disable scrubbing of placement groups (the process of verifying data integrity of stored objects) to maximize cluster resources that can go to both client I/O and recovery I/O for data migration: -```console -ceph osd set noscrub -ceph osd set nodeep-scrub -``` - -Some [Ceph documentation](https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/administration_guide/adding_and_removing_osd_nodes#recommendations) also recommends limiting backfill and recovery work while storage is being added or removed. -The intent is to maximize client I/O while sacrificing throughput of data migration. -I do not believe this is strictly necessary and at this point I would prefer to not limit recovery work in the hopes of finishing data migrations as quickly as possible. -I suspect that most cluster administrators would not be removing storage when the cluster is under heavy load in the first place. -This trade-off can be revisited if we see unacceptable performance impact. - -#### Adding Storage -As mentioned previously, we will add one OSD at a time in order to allow the cluster to rebalance itself in a controlled manner and to avoid getting into a situation where there is an unacceptable amount of churn and thrashing. -Adding a new OSD is fairly simple since the OSD pod logic already supports it: -* If the entire node is being added, ensure the node is added to the CRUSH map: `ceph osd crush add-bucket {bucket-name} {type}` -* For each OSD: - * Register, format, add OSD to the crush map and start the OSD process like normal - * Wait for all placement groups to reach `active+clean` state, meaning data migration is complete. - -#### Removing Storage -Removing storage is a more involved process and it will also be done one OSD at a time to ensure the cluster returns to a clean state. -Of special note for removing storage is that a check should be performed to ensure that the cluster has enough remaining storage to recover (backfill) the entire set of objects from the OSD that is being removed. -If the cluster does not have enough space for this (e.g., it would hit the `full` ratio), then the removal should not proceed. - -For each OSD to remove, the following steps should be performed: -* reweight the OSD to 0.0 with `ceph osd crush reweight osd. 0.0`, which will trigger data migration from the OSD. -* wait for all data to finish migrating from the OSD, meaning all placement groups return to the `active+clean` state -* mark the OSD as `out` with `ceph osd out osd.` -* stop the OSD process and remove it from monitoring -* remove the OSD from the CRUSH map: `ceph osd crush remove osd.` -* delete the OSD's auth info: `ceph auth del osd.` -* delete the OSD from the cluster: `ceph osd rm osd.` -* delete the OSD directory from local storage (if using `dataDirHostPath`): `rm -fr /var/lib/rook/` - -If the entire node is being removed, ensure that the host node is also removed from the CRUSH map: -```console -$ ceph osd crush rm -``` - -#### Completion -After all storage updates are completed, both additions and removals, then we can once again enable scrubbing: -```console -ceph osd unset noscrub -ceph osd unset nodeep-scrub -``` - -### Placement Groups -The number of placement groups in the cluster compared to the number of OSDs is a difficult trade-off without knowing the user's intent for future cluster growth. -The general rule of thumb is that you want around 100 PGs per OSD. -With less than that, you have potentially unbalanced distribution of data with certain OSDs storing more than others. -With more PGs than that, you have increased overhead in the cluster because more OSDs need to coordinate with each other, impacting performance and reliability. - -It's important to note that **shrinking** placement group count (merging) is still **not supported** in Ceph. -Therefore, you can only increase the number of placement groups (splitting) over time. - -If the cluster grows such that we have too few placement groups per OSD, then we can consider increasing the number of PGs in the cluster by incrementing the `pg_num` and `pgp_num` for each storage pool. -Similar to adding new OSDs, this increase of PGs should be done incrementally and in a coordinated fashion to avoid degrading performance significantly in the cluster. - -Placement group management will be tracked in further detail in [issue 560](https://github.com/rook/rook/issues/560). - -## Scope -The implementation of the design described in this document could be done in a phased approach in order to get critical features out sooner. -One proposal for implementation phases would be: -1. **Simple add storage**: Storage resources can be added to the Cluster CRD and extremely minimal orchestration would be performed to coordinate the storage changes. -Cluster performance impact would not be ideal but may be tolerable for many scenarios, and Rook clusters would then have dynamic storage capabilities. -1. **Simple remove storage**: Similar to the simple adding of storage, storage resources can be removed from the Cluster CRD with minimal orchestration. -1. **Dynamic storage orchestration**: The more careful orchestration of storage changes would be implemented, with the operator and OSD pods coordinating across the cluster to slowly ramp up/down storage changes with minimal impact to cluster performance. -1. **Non-storage cluster field updates**: All other properties in the cluster CRD supported for updates will be implemented (e.g., `mon`, `resources`, etc.). -1. **Placement Group updates**: Placement group counts will be updated over time as the cluster grows in order to optimize cluster performance. diff --git a/design/ceph/data-model.md b/design/ceph/data-model.md deleted file mode 100644 index 94452277d..000000000 --- a/design/ceph/data-model.md +++ /dev/null @@ -1,47 +0,0 @@ -# Rook Data Model - -``` -# Operator -The operator manages multiple Rook storage clusters -The operator manages all CRDs for the Rook clusters -One instance of the operator is active -Multiple instances of the operator can be on standby in an HA configuration - -# Storage Cluster -The cluster CRD defines desired settings for a storage cluster -All resources for a Rook cluster are created in the same Kubernetes namespace -A cluster has an odd number of mons that form quorum -A cluster has an osd per storage device -A cluster has zero or more pools -A cluster has zero or more block devices -A cluster has zero or more object stores -A cluster has zero or more shared file services - -# Pool -The pool CRD defines desired settings for a pool -A pool is created with either replication or erasure coding -Replication can be 1 or more -Erasure coding requires k >= 2 and m >= 1, where k is data chunks and m is coding chunks -Erasure coding specifies a plugin (default=jerasure) -Erasure coding specifies an encoding algorithm (default=reed_sol_van) -A pool can set its failure domain using a CRUSH rule (default=host) - -# Object Store -The object store CRD defines desired settings for an object store -An object store has a set of pools dedicated to its instance -Object store metadata pools can specify the same set of pool settings -The object store data pool can specify all pool settings -An object store has a unique set of authorized users -An object store has one or more stateless RGW pods for load balancing -An object store can specify an SSL certificate for secure connections -An object store can specify a port for RGW services (default=53390) -An object store represents a Ceph zone -An object store can be configured for replication from an object store in the same cluster or another cluster - -# Shared File System -The file system CRD defines desired settings for a file system -A file system has one MDS service if not partitioned -A file system has multiple MDS services if partitioned -A file system has one metadata pool -A file system has one data pool -``` diff --git a/design/ceph/decouple-ceph-version.md b/design/ceph/decouple-ceph-version.md deleted file mode 100644 index 38cdc9a32..000000000 --- a/design/ceph/decouple-ceph-version.md +++ /dev/null @@ -1,163 +0,0 @@ -# Decoupling the Ceph version - -**Targeted for v0.9** - -Today the version of Ceph is tied to the version of Rook. Each release of Rook releases a specific version of Ceph that is embedded in the same docker image. -This needs to be changed such that the version of Ceph is decoupled from the release of Rook. By separating the decision of which version of Ceph will be deployed with Rook, we have a number of advantages: -- Admins can choose to run the version of Ceph that meets their requirements. -- Admins can control when they upgrade the version of Ceph. The data path upgrade needs to be carefully controlled by admins in production environments. -- Developers can test against any version of Ceph, whether a stable version of Luminous or Mimic, or even a private dev build. - -Today Rook still includes Luminous, even while Mimic was released several months ago. A frequently asked question from users is when we are going to update to Mimic so they can take advantage of the new features such as the improved dashboard. That question will not be heard anymore after this design change. As soon as a new build of Ceph is available, Rook users will be able to try it out. - -## Coupled (Legacy) Design - -The approach of embedding Ceph into the Rook image had several advantages that contributed to the design. -- Simpler development and test matrix. A consistent version of Ceph is managed and there are no unstable Ceph bits running in the cluster. -- Simpler upgrade path. There is only one version to worry about upgrading. - -The project is growing out of these requirements and we need to support some added complexity in order to get the benefits of the decoupled versions. - -## New Design - -There are two versions that will be specified independently in the cluster: the Rook version and the Ceph version. - -### Rook Version - -The Rook version is defined by the operator's container `image` tag. All Rook containers launched by the operator will also launch the same version of the Rook image. -The full image name is an important part of the version. This allows the container to be loaded from a private repo if desired. - -In this example, the Rook version is `rook/ceph:v0.8.1`. - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-ceph-operator -spec: - template: - spec: - containers: - - name: rook-ceph-operator - image: rook/ceph:v0.8.1 -``` - -### Ceph Version - -The Ceph version is defined under the property `cephVersion` in the Cluster CRD. All Ceph daemon containers launched by the Rook operator will use this image, including the mon, mgr, -osd, rgw, and mds pods. The significance of this approach is that the Rook binary is not included in the daemon containers. All initialization performed by Rook to generate the Ceph config and prepare the daemons must be completed in an [init container](https://github.com/rook/rook/issues/2003). Once the Rook init containers complete their execution, the daemon container will run the Ceph image. The daemon container will no longer have Rook running. - -In the following Cluster CRD example, the Ceph version is Mimic `13.2.2` built on 23 Oct 2018. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: Cluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - cephVersion: - image: ceph/ceph:v13.2.2-20181023 -``` - -### Operator Requirements - -The operator needs to run the Ceph client tools to manage the cluster. For example, the `ceph` tool is needed for general Ceph configuration and status, while `radosgw-admin` is required for managing an object store. Therefore, all the necessary client tools will still be included in the Rook image. - -The client tools are tested by the Ceph team to be backward and forward compatible by two versions. This means the operator can support a version of Ceph up to two versions older than the client tools it contains. -With each Rook release, the tools will be included from the latest release of Ceph. For example, in 0.9 Rook will likely include the Mimic tools. Upgrades would be supported from Luminous to Mimic. -Rook 0.9 can also be tested to support upgrades to Nautilus since they may be released in the same time frame. Since the Ceph tools are forward compatible, the Mimic tools will be sufficient to support upgrading to Nautilus. -If Nautilus is released after Rook 0.9, a patch release can be made to 0.9 so that Rook can officially support the upgrade at that point. The changes in the patch release should be minimal since upgrading to Nautilus could have been mostly planned for in 0.9. - -The operator will be made to understand differences in the Ceph versions that are necessary for orchestration. Some examples might include: -- If running Luminous, start the Ceph dashboard on http. If running Mimic, a self-signed cert could be generated to start the dashboard with https. -- If a new daemon is added in a future Ceph release, the operator would understand to deploy that daemon only if the Ceph version is at least that version. - -### Supported Versions - -Rook will support a very specific list of major versions. Outside these versions, Rook will not be aware of the needs for configuring and upgrading the cluster. -In v0.9, the supported versions will be: -- luminous (ceph/ceph:v12.2.x) -- mimic (ceph/ceph:v13.2.x) - -Depending on the timing of the 0.9 and Nautilus releases, Nautilus will likely be supported either in 0.9 or a patch release. Versions not yet officially supported -can be tested with settings in the CRD to be mentioned below. - -All Rook implementation specific to a Ceph version will apply to all patch releases of that major release. For example, Rook is not expected to have any differences handling -various Mimic patch releases. - -### Upgrades - -The flexibility during upgrades will now be improved since the upgrade of Rook will be independent from the upgrade to the Ceph version. -- To upgrade Rook, update the version of the Rook operator container -- To upgrade Ceph, make sure Rook is running the latest release, then update the `cephVersion.image` in the cluster CRD - -The versions to be supported during upgrade will be a specific set for each version of Rook. In 0.9, it is anticipated that the only upgrade of Ceph -supported would only be Luminous to Mimic. When Rook officially adds support for a release of Ceph (ie. Nautilus), the upgrade path will also be supported from one previous version. -For example, after Nautilus support is added, Luminous users would first need to upgrade to Mimic and then Nautilus. While it may be possible to skip versions -during upgrade, it is not supported in order to keep the testing more scoped. - -#### Upgrade Sequence - -Each time the operator starts, an idempotent orchestration is executed to ensure the cluster is in the desired state. As part of the orchestration, the version of the operator -will be reviewed. If the version has changed, the operator will update each of the daemons in a predictable order such as: mon, mgr, osd, rgw, mds. If the Rook upgrade requires any special steps, they will be handled as each version upgrade requires. - -When the cluster CRD is updated with a new Ceph version, the same idempotent orchestration is executed to evaluate desired state that needs to be applied to the cluster. -Over time as the operator becomes smarter and more versions are supported, the custom upgrade steps will be implemented as needed. - -Daemons will only be restarted when necessary for the upgrade. The Rook upgrade sometimes will not require a restart of the daemons, -depending on if the pod spec changed. The Ceph upgrade will always require a restart of the daemons. In either case, a restart will be done in an orderly, rolling manner -with one pod at a time along with health checks as the upgrade proceeds. The upgrade will be paused if the cluster becomes unhealthy. - -See the [Upgrade design doc](ceph-upgrade.md) for more details on the general upgrade approach. - -#### Admin control of upgrades - -To allow more control over the upgrade, we define `upgradePolicy` settings. They will allow the admin to: -- Upgrade one type of daemon at a time and confirm they are healthy before continuing with the upgrade -- Allow for testing of future versions that are not officially supported - -The settings in the CRD to accommodate the design include: -- `upgradePolicy.cephVersion`: The version of the image to start applying to the daemons specified in the `components` list. - - `allowUnsupported`: If `false`, the operator would refuse to upgrade the Ceph version if it doesn't support or recognize that version. This would allow testing of upgrade to unreleased versions. The default is `false`. -- `upgradePolicy.components`: A list of daemons or other components that should be upgraded to the version `newCephVersion`. The daemons include `mon`, `osd`, `mgr`, `rgw`, and `mds`. The ordering of the list will be ignored as Rook will only support ordering as it determines necessary for a version. If there are special upgrade actions in the future, they could be named and added to this list. - -For example, with the settings below the operator would only upgrade the mons to mimic, while other daemons would remain on luminous. When the admin is ready, he would add more daemons to the list. - -```yaml -spec: - cephVersion: - image: ceph/ceph:v12.2.9-20181026 - allowUnsupported: false - upgradePolicy: - cephVersion: - image: ceph/ceph:v13.2.2-20181023 - allowUnsupported: false - components: - - mon -``` - -When the admin is completed with the upgrade or he is ready to allow Rook to complete the full upgrade for all daemons, he would set `cephVersion.image: ceph/ceph:v13.2.2`, and the operator would ignore the `upgradePolicy` since the `cephVersion` and `upgradePolicy.cephVersion` match. - -If the admin wants to pause or otherwise control the upgrade closely, there are a couple of natural back doors: -- Deleting the operator pod will effectively pause the upgrade. Starting the operator pod up again would resume the upgrade. -- If the admin wants to manually upgrade the daemons, he could stop the operator pod, then set the container image on each of the Deployments (pods) he wants to update. The difficulty with this approach is if there are any changes to the pod specs that are made between versions of the daemons. The admin could update the pod specs manually, but it would be error prone. - -#### Developer controls - -If a developer wants to test the upgrade from mimic to nautilus, he would first create the cluster based on mimic. Then he would update the crd with the "unrecognized version" attribute in the CRD to specify nautilus such as: -```yaml -spec: - cephVersion: - image: ceph/ceph:v14.1.1 - allowUnsupported: true -``` - -Until Nautilus builds are released, the latest Nautilus build can be tested by using the image `ceph/daemon-base:latest-master`. - -### Default Version - -For backward compatibility, if the `cephVersion` property is not set, the operator will need to internally set a default version of Ceph. -The operator will assume the desired Ceph version is Luminous 12.2.7, which was shipped with Rook v0.8. -This default will allow the Rook upgrade from v0.8 to v0.9 to only impact the Rook version and hold the Ceph version at Luminous. -After the Rook upgrade to v0.9, the user can choose to set the `cephVersion` property to some newer version of Ceph such as mimic. diff --git a/design/ceph/dedicated-osd-pod.md b/design/ceph/dedicated-osd-pod.md deleted file mode 100644 index c1d8695ed..000000000 --- a/design/ceph/dedicated-osd-pod.md +++ /dev/null @@ -1,96 +0,0 @@ -# Run OSD in its own Pod - -## TL;DR - -A one-OSD-per-Pod placement should be implemented to improve reliability and resource efficiency for Ceph OSD daemon. - -## Background - -Currently in Rook 0.7, Rook Operator starts a ReplicaSet to run [`rook osd`](https://github.com/rook/rook/blob/master/cmd/rook/osd.go) command (hereafter referred to as `OSD Provisioner`) on each storage node. The ReplicaSet has just one replica. `OSD Provisioner` scans and prepares devices, creates OSD IDs and data directories or devices, generates Ceph configuration. At last, `OSD Provisioner` starts all Ceph OSD, i.e. `ceph-osd`, daemons in foreground and tracks `ceph-osd` processes. - -As observed, all Ceph OSDs are running in the same Pod. - -## Limitations - -The limitations of current design are: - -- Reliability issue. One Pod for all OSDs doesn't have the highest reliability nor efficiency. If the Pod is deleted, accidentally or during maintenance, all OSDs are down till the ReplicaSet restart. -- Efficiency issue. Resource limits cannot be set effectively on the OSDs since the number of osds per in the pod could vary from node to node. The operator cannot make decisions about the topology because it doesn't know in advance what devices are available on the nodes. -- Tight Ceph coupling. The monolithic device discovery and provisioning code cannot be reused for other backends. -- Process management issue. Rook's process management is very simple. Using Kubernetes pod management is much more reliable. - - -A more comprehensive discussions can be found at [this issue](https://github.com/rook/rook/issues/1341). - -## Terms - -- Device Discovery. A DaemonSet that discovers unformatted devices on the host. The DaemonSet populates a per node Raw Device Configmap with device information. The daemonSet is running on nodes that are labelled as storage nodes. The DaemonSet can start independently of Rook Operator. Device Discovery is storage backend agnostic. - -- Device Provisioner. A Pod that is given device or directory paths upon start and make backend specific storage types. For instance, the provisioner prepares OSDs for Ceph backend. It is a Kubernetes batch job and exits after the devices are prepared. - -## Proposal - -We propose the following change to address the limitations. - - -### Create new OSDs -| Sequence |Rook Operator | Device Discovery | Device Provisioner | Ceph OSD Deployment | -|---|---|---|---|---| -| 0 | | Start on labeled storage nodes, discover unformatted devices and store device information in per node Raw Device Configmap | | -| 1 | Read devices and node filters from cluster CRD | | | -| 2 | parse Raw Device Configmap, extract nodes and device paths and filters them based on cluster CRD, and create an Device Provisioner deployment for each device || | | -| 3 | Watch device provisioning Configmap | | Prepare OSDs, Persist OSD ID, datapath, and node info in a per node device provisioning Configmap | | -| 4 | Detect device provisioning Configmap change, parse Configmap, extract OSD info, construct OSD Pod command and arg | | | | -| 5 | Create one deployment per OSD | | |Start `ceph-osd` Daemon one Pod per device | - - -This change addresses the above limitations in the follow ways: -- High reliability. Each `ceph-osd` daemon runs its own Pod, their restart and upgrade are by Kubernetes controllers. Upgrading Device Provisioner Pod no longer restarts `ceph-osd` daemons. -- More efficient resource requests. Once Device Discovery detects all devices, Rook Operator is informed of the topology and assigns appropriate resources to each Ceph OSD deployment. -- Reusable. Device discovery can be used for other storage backends. - - -### Detailed Device Discovery Process - -Each `Device Discovery` DaemonSet walks through device trees to unformatted block devices and stores the device information in a per node `Raw Device Configmap`. - -A sample of `Raw Device Configmap` from Node `node1` is as the following: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: rook-system - name: node1-raw-devices -data: - devices - - device-path: /dev/disk/by-id/scsi-dead # persistent device path - size: 214748364800 # size in byte - rotational: 0 # 0 for ssd/nvme, 1 for hdd, based on reading from sysfs - extra: '{ "vendor": "X", "model": "Y" }' # extra information under sysfs about the device in json, such as vendor/model, scsi level, target info, etc. - - device-path: /dev/disk/by-id/scsi-beef # persistent device path - size: 214748364800 # size in byte - rotational: 1 # 0 for ssd/nvme, 1 for hdd, based on reading from sysfs -``` - -### Discussions - -It is expected Device Discovery will be merged into Rook Operator once local PVs are supported in Rook Cluster CRD. Rook Operator can infer the device topology from local PV Configmaps. However, as long as raw devices or directories are still in use, a dedicated Device Discovery Pod is still needed. - -If the storage nodes are also compute nodes, it is possible that dynamically attached and unformatted devices to those nodes are discovered by Device Discovery DaemonSet. To avoid this race condition, admin can choose to use separate device tree directories: one for devices used for Rook and the other for compute. Or the Cluster CRD should explicitly identify which devices should be used for Rook. - -Alternatively, since `rook agent` is currently running as a DaemonSet on all nodes, it is conceivable to make `rook agent` to poll devices and update device orchestration Configmap. This approach, however, needs to give `rook agent` the privilege to modify Configmaps. Moreover, `Device Discovery` Pod doesn't need privileged mode, host network, or write access to hosts' `/dev` directory, all of which are required by `rook agent`. - -## Impact - -- Security. Device Provisioner Pod needs privilege to access Configmaps but Ceph OSD Pod don't need to access Kubernetes resources and thus don't need any RBAC rules. - -- Rook Operator. Rook Operator watches two Configmaps: the raw device Configmaps that created by Device Discovery Pod and storage specific device provisioning Configmaps that are created by Device Provisioner Pod. For raw device Configmap, Operator creates storage specific device provisioner deployment to prepare these devices. For device provisioning Configmaps, Operator creates storage specific daemon deployment (e.g. Ceph OSD Daemon deployments) with the device information in Configmaps and resource information in Cluster CRD. - -- Device Discovery. It is a new long running process in a DaemonSet that runs on each node that has matching labels. It discovers storage devices on the nodes and populates the raw devices Configmaps. - -- Device Provisioner. Device Provisioner becomes a batch job, it no longer exec Ceph OSD daemon. - -- Ceph OSD Daemon. `ceph-osd` is no longer exec'ed by Device Provisioner, it becomes the Pod entrypoint. - -- Ceph OSD Pod naming. Rook Operator creates Ceph OSD Pod metadata using cluster name, node name, and OSD ID. diff --git a/design/ceph/dynamic-provision-filesystem.md b/design/ceph/dynamic-provision-filesystem.md deleted file mode 100644 index 1d48bf46d..000000000 --- a/design/ceph/dynamic-provision-filesystem.md +++ /dev/null @@ -1,137 +0,0 @@ -# File System Dynamic Provisioning - -## Overview - -Currently in Rook, to consume a CephFS, the user will have to specify the CephFS volume plugin as well as the required inputs. Some of this inputs are very cumbersome and required hacky commands to obtain them. We should use the dynamic provision feature with PVCs and PVs to facilitate the consumption of CephFS. There are many benefits to this. -Using PVCs allows us to deeply adhere to the Kubernetes API. That means, we get all the features that Kubernetes gives us like: setting reclaim policy on the volume, use RBAC on provisioning and defining accessmode. -On consumption, the pod only has to reference the PVCs. That means, the pod manifest doesn't have to change whether you change the PVC to use block or filesystems. - -Another benefit is that it allows us to consume StorageClass that the admin users define and create. The users don't have to worry about metadataPool, erasureCoded, affinity, toleration etc. All they care is creating a filesystem PVC and referencing a storageClass that matches their filesystem needs. - -This feature has already been asked by a few users in our community. An issue has been created https://github.com/rook/rook/issues/1125. -Also Dynamic Provision Filesystem is a concept that has already been done in https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/cephfs. So Rook can adopt a similar approach. - -## Current Experience - -To consume a filesystem, the experience is the following: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: mysql -spec: - strategy: - type: Recreate - template: - spec: - containers: - - image: mysql:5.6 - name: mysql - env: - - name: MYSQL_ROOT_PASSWORD - value: changeme - ports: - - containerPort: 3306 - name: mysql - volumeMounts: - - name: mysql-persistent-storage - mountPath: /var/lib/mysql - volumes: - - name: mysql-persistent-storage - cephfs: - monitors: - - monitor1 - - monitor2 - - monitor3 - user: admin - secretRef: - name: rook-admin -``` - -Users will have to come up with these values and ensure every parameter is provided correctly. - -## Experience with Dynamic Provisioned Filesystem - -To create a filesystem, you just create a PVC object. This is consistent with all other storage provisioning in Kubernetes. - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: myfsdata -spec: - storageClassName: rook-filesystem-simple - path: /myData # Will use root path, "/", if not provided - accessModes: - - ReadWriteMany -``` - -To consume it, the pod manifest is shown as follows: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: mysql -spec: - strategy: - type: Recreate - template: - spec: - containers: - - image: mysql:5.6 - name: mysql - env: - - name: MYSQL_ROOT_PASSWORD - value: changeme - ports: - - containerPort: 3306 - name: mysql - volumeMounts: - - name: mysql-persistent-storage - mountPath: /var/lib/mysql - volumes: - - name: mysql-persistent-storage - persistentVolumeClaim: - claimName: myfsdata -``` - -Note that the consuming pod manifest looks the same whether it is mounting a filesystem or a block device. - -## StorageClass Example - -Notice that there was a reference to a StorageClass called `rook-filesystem-simple` in the filesystem PVC example I previously showed. Dynamic provisioned storage refers to a StorageClass object for details and configuration about how the storage should be provisioned. -The storage class is setup by the administrator and can look as follows for filesystem: - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-filesystem-simple -provisioner: rook.io/filesystem -parameters: - fsName: myFS # Name of the filesystem to use. -``` - -The referenced filesystem, `myFS`, would have to be also created by the admin using a [Filesystem CRD](/Documentation/ceph-filesystem-crd.md). - -The admin could also have created a more detailed StorageClass for more a durable filesystem as follows. Lets call it `rook-filesystem-gold`: - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-filesystem-gold -provisioner: rook.io/filesystem -parameters: - fsName: mySuperHAFS -``` - -With multiple storage class objects, users can refer to many filesystems that match their needs. - -## Implementation - -In order to do this, we will need to leverage the external-provisioner controller to watch for PVC objects. The external-provisioner controller is already being used by Rook for provisioning block devices. - -The implementation logic will look similar to the logic done for block devices. The provisioner will watch for PVC objects of types `rook.io/filesystem`. When the PVC is created, the provisioner will parse for the filesystem information from the StorageClass and create a volume source with all required information. Similarly, when the PVC is deleted, the underlying filesystem components (mds, data pools, etc) will also be deleted. diff --git a/design/ceph/external-management.md b/design/ceph/external-management.md deleted file mode 100644 index d2f98c0a8..000000000 --- a/design/ceph/external-management.md +++ /dev/null @@ -1,95 +0,0 @@ -# Enable external Ceph management tools - -Target version: 0.9 - -## TL;DR - -Some tools want to use Rook to run containers, but not manage the logical -Ceph resources like Pools. We should make Rook's pool management optional. - -## Background - -Currently in Rook 0.8, creating and destroying a Filesystem (or ObjectStore) -in a Ceph cluster also creates and destroys the associated Ceph filesystem -and pools. - -The current design works well when the Ceph configuration is within the -scope of what Rook can configure itself, and the user does not modify -the Ceph configuration of pools out of band. - -## Limitations - -The current model is problematic in some cases: - -- A user wants to use Ceph functionality outside of Rook's subset, and - therefore create their pools by hand before asking Rook to run - the daemon containers for a filesystem. -- A user externally modifies the configuration of a pool (such as the - number of replicas), they probably want that new configuration, rather than - for Rook to change it back to match the Rook Filesystem settings. -- A risk-averse user wants to ensure that mistaken edits to their Rook config cannot - permanently erase Ceph pools (i.e. they want to only delete pools through - an imperative interface with confirmation prompts etc). - -## Proposal - -In FilesystemSpec (and ObjectStoreSpec), when the metadata and -data pool fields are left empty, Rook will not do any management of logical -Ceph resources (Ceph pools and Ceph filesystems) for the filesystem. - -The pools may be initially non-nil, and later modified -to be nil. In this case, while Rook may have created the logical -resources for the filesystem, it will not remove them when the Rook filesystem -is removed. - -If either of the metadata/data fields are non-nil, then they both must -be non-nil: Rook will not partially manage the pools for a given filesystem -or object store. - -### Before (pools always specified) - -```yaml -apiVersion: ceph.rook.io/v1 -kind: Filesystem -metadata: - name: myfs - namespace: rook-ceph -spec: - metadataPool: - replicated: - size: 3 - dataPools: - - erasureCoded: - dataChunks: 2 - codingChunks: 1 - metadataServer: - activeCount: 1 - activeStandby: true -``` - -### After (pools may be omitted) - -In this example, the pools are omitted. Rook will not create -any pools or a Ceph filesystem. A filesystem named ``myfs`` should already -exist in Ceph, otherwise Rook will not start any MDS pods. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: Filesystem -metadata: - name: myfs - namespace: rook-ceph -spec: - metadataServer: - activeCount: 1 - activeStandby: true -``` - - -## Impact - -- Rook Operator: add logic to skip logical resource -management when pools are omitted in FilesystemSpec or ObjectStoreSpec - -- Migration: none required. Existing filesystems and objectstores always -have pools set explicitly, so will continue to have these managed by Rook. diff --git a/design/ceph/filesystem.md b/design/ceph/filesystem.md deleted file mode 100644 index 10f4cea6a..000000000 --- a/design/ceph/filesystem.md +++ /dev/null @@ -1,91 +0,0 @@ -# Rook Shared File System - -## Overview - -A shared file system is a collection of resources and services that work together to serve a files for multiple users across multiple clients. Rook will automate the configuration of the Ceph resources and services that are necessary to start and maintain a highly available, durable, and performant shared file system. - -### Prerequisites - -A Rook storage cluster must be configured and running in Kubernetes. In this example, it is assumed the cluster is in the `rook` namespace. - -## File System Walkthrough - -When the storage admin is ready to create a shared file system, he will specify his desired configuration settings in a yaml file such as the following `filesystem.yaml`. This example is a simple configuration with metadata that is replicated across different hosts, and the data is erasure coded across multiple devices in the cluster. One active MDS instance is started, with one more MDS instance started in standby mode. -```yaml -apiVersion: ceph.rook.io/v1 -kind: Filesystem -metadata: - name: myfs - namespace: rook-ceph -spec: - metadataPool: - replicated: - size: 3 - dataPools: - - erasureCoded: - dataChunks: 2 - codingChunks: 1 - metadataServer: - activeCount: 1 - activeStandby: true -``` - -Now create the file system. -```bash -kubectl create -f filesystem.yaml -``` - -At this point the Rook operator recognizes that a new file system needs to be configured. The operator will create all of the necessary resources. -1. The metadata pool is created (`myfs-meta`) -1. The data pools are created (only one data pool for the example above: `myfs-data0`) -1. The Ceph file system is created with the name `myfs` -1. If multiple data pools were created, they would be added to the file system -1. The file system is configured for the desired active count of MDS (`max_mds`=3) -1. A Kubernetes deployment is created to start the MDS pods with the settings for the file system. Twice the number of instances are started as requested for the active count, with half of them in standby. - -After the MDS pods start, the file system is ready to be mounted. - - -## File System CRD - -The file system settings are exposed to Rook as a Custom Resource Definition (CRD). The CRD is the Kubernetes-native means by which the Rook operator can watch for new resources. The operator stays in a control loop to watch for a new file system, changes to an existing file system, or requests to delete a file system. - -### Pools - -The pools are the backing data store for the file system and are created with specific names to be private to a file system. Pools can be configured with all of the settings that can be specified in the [Pool CRD](/Documentation/ceph-pool-crd.md). The underlying schema for pools defined by a pool CRD is the same as the schema under the `metadataPool` element and the `dataPools` elements of the file system CRD. - -```yaml - metadataPool: - replicated: - size: 3 - dataPools: - - replicated: - size: 3 - - erasureCoded: - dataChunks: 2 - codingChunks: 1 -``` - -Multiple data pools can be configured for the file system. Assigning users or files to a pool is left as an exercise for the reader with the [CephFS documentation](http://docs.ceph.com/docs/master/cephfs/file-layouts/). - -### Metadata Server - -The metadata server settings correspond to the MDS service. -- `activeCount`: The number of active MDS instances. As load increases, CephFS will automatically partition the file system across the MDS instances. Rook will create double the number of MDS instances as requested by the active count. The extra instances will be in standby mode for failover. -- `activeStandby`: If true, the extra MDS instances will be in active standby mode and will keep a warm cache of the file system metadata for faster failover. The instances will be assigned by CephFS in failover pairs. If false, the extra MDS instances will all be on passive standby mode and will not maintain a warm cache of the metadata. -- `placement`: The mds pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, `podAntiAffinity`, and `topologySpreadConstraints` similar to placement defined for daemons configured by the [cluster CRD](/cluster/examples/kubernetes/ceph/cluster.yaml). - -```yaml - metadataServer: - activeCount: 1 - activeStandby: true - placement: -``` - -### Multiple File Systems -In Ceph Luminous, multiple file systems is still considered an experimental feature. While Rook seamlessly enables this scenario, be aware of the issues in the [CephFS docs](http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster) with snapshots and security implications. - - -### CephFS data model - -For a description of the underlying Ceph data model, see the [CephFS Terminology](http://docs.ceph.com/docs/master/cephfs/standby/#terminology). diff --git a/design/ceph/local-node-agent.md b/design/ceph/local-node-agent.md deleted file mode 100644 index ec22fa64b..000000000 --- a/design/ceph/local-node-agent.md +++ /dev/null @@ -1,211 +0,0 @@ -# **Rook Local Node Agent** - -## **Overview** -In a distributed storage system, there are operations that must be performed on a specific node in the cluster where storage is to be consumed from. -For example, a database pod that requires persistent storage needs to attach and mount a volume backed by the storage cluster on the same node that the pod is scheduled to run on. -In this document, we propose a design for a Rook "agent" that will be deployed in the cluster to run on nodes that have a need to perform operations to consume storage from the cluster. - -## **Background: Flexvolume Issues** -In Kubernetes 1.8, the Kubernetes Storage SIG recommends storage providers to implement out-of-tree plugins to provide persistent storage. -With the full implementation of Container Storage Interface (CSI) several months away from completion, the recommended approach in the meantime is to implement a Flexvolume. -Deployment of a Flexvolume will be improved in 1.8 to make it less of a manual process (e.g., dynamic discovery), as described in the following pull request: https://github.com/kubernetes/community/pull/833 - -However, there are still some limitations to the capabilities of a Flexvolume implementation. -For example, a Flexvolume plugin does not execute in a context that has cluster credentials, so it cannot communicate with the Kubernetes API to perform such operations as creating Custom Resource Definitions (CRDs). -This document will describe how a Rook local node agent would work with the Flexvolume architecture to provide storage in a Kubernetes cluster, in addition to other responsibilities that must be performed on specific nodes in the cluster. - -## **Detailed Design** - -### **Responsibilities** -The Rook agent will have multiple responsibilities beyond just performing storage operations for the Rook Flexvolume driver. -One can think of the agent as a mini operator that functions at the node level. -The initial proposed responsibilities of the agent are: - -1. Deploy the Rook Flexvolume driver to the `volume-plugin-dir` directory on every node -1. Perform storage operations on behalf of the Flexvolume driver, such as attaching, detaching, mounting and unmounting Rook cluster storage -1. Cluster clean up operations, such as forcefully unmapping RBD devices when the Rook cluster is being deleted while there are still pods consuming those volumes -1. Proxy I/O traffic from kernel modules to user space (e.g. Network Block Device (NBD) kernel module to librbd in userspace) - -### **Deployment** -The Rook operator will deploy the Rook agent to all nodes in the cluster via a Daemonset in the same namespace in which the operator is running. -It is a permanent (long running) daemon that has a lifecycle tied to the node that it is scheduled on. -The agent deployment will happen when the operator is first created, in the same flow where the operator is declaring CRDs for clusters, pools, etc. -This means that the Rook agents are not associated with a specific Rook cluster and that they will be able to handle operations for any Rook cluster instance. -The Rook operator CRD will be updated to allow selectors to control the set of nodes that the agent is scheduled on, but in most cases it is desirable for it to be running on all nodes. - -Each agent pod will be running the same `rook/rook` container image in use today, but with a new `agent` command (similar to the existing `mon` or `osd` commands). -This image will have all the tools necessary to provision and manage storage (e.g., `rbd`, iSCSI tools and the Flexvolume driver). -When the agent starts up, it will immediately copy the Rook Flexvolume driver from its image to the `volume-plugin-dir` on its host node. -In Kubernetes 1.8+, the [dynamic Flexvolume plugin discovery](https://github.com/kubernetes/community/pull/833) will find and initialize our driver, but in older versions of Kubernetes a manual restart of the Kubelet will be required. - -After the driver has been deployed, the Rook agent will create a Unix domain socket that will serve as the communications channel between the agent and the Flexvolume driver. -The driver will initiate a handshake to the agent over the socket during its [`Init` function](https://github.com/kubernetes/community/blob/master/contributors/devel/flexvolume.md#init) when it is executed after being discovered by the Controller manager. -The agent will wait for the driver to complete the handshake before moving on and being ready to accept Flexvolume storage requests. - -Note that the Rook operator will continue to run its existing [dynamic volume provisioner](https://github.com/rook/rook/tree/master/pkg/operator/provisioner) to provision and delete persistent volumes as needed. - -### **Flexvolume** -The Rook Flexvolume driver will be very lightweight, simply implementing `Mount()` and `Unmount()` from the [required interface](https://github.com/kubernetes/community/blob/master/contributors/devel/flexvolume.md#driver-invocation-model) -and then offloading the storage provider work to the Rook agent over the Unix domain socket. -Note this means that the attach/detach controller and its centralized attaching/detaching will not be used. -Instead, attaching will be performed as part of `Mount()` and detaching as part of `Unmount()`, as described in the next section. -This is important because it means that the Rook Flexvolume driver and Agent do **not** need to be installed on the Kubernetes master node (control plane), which is not possible in some environments such as Google Container Engine (GKE). -This makes this proposed design more portable. - -#### Control Flow Overview -Below is a simplified description of the control flow for providing block storage ready to be consumed by a pod: - -1. Rook operator is running and a Rook agent pod is running on every node in the cluster. The Rook Flexvolume driver has been deployed to the `volume-plugin-dir` on each node. -1. A user creates a Persistent Volume Claim (PVC) specifying a storageclass that uses the `rook.io/block` [provisioner](https://github.com/kubernetes-incubator/external-storage/blob/e6e64ad1a431fea37f723882f36251f8d2fe4247/lib/controller/volume.go#L29) -1. `Provision()` is called on the operator's provisioner to create a block image in the cluster. At this point, the Provision phase is complete and the PVC/PV are considered `Bound` together. -1. Once a pod has been created that consumes the PVC, `Mount()` is called by the Kubelet on the Rook Flexvolume on the node that will be consuming the storage, which calls into its local Rook agent via the Unix domain socket. - * `Mount()` is a blocking call by the Kubelet and it will wait while the entire mapping/mounting is performed by the driver and agent synchronously. -1. The agent then creates a volume attach CRD that represents the attachment of the cluster volume to the node. -1. Next, the agent performs the mapping of the volume to a local device on the node and updates the status of the CRD and its device path field (e.g., `/dev/rbd0`). -1. Control is returned to the driver, and if the mapping was successful then the driver will proceed with mounting the new local device to the requested path on the host. -If necessary, the driver will also format the volume according to the filesystem type expressed on the storageclass for the volume. -1. The driver then returns from `Mount()` with a successful result to the Kubelet. - -##### Unmount -During an unmount operation, the [`Unmount()`](https://github.com/kubernetes/community/blob/master/contributors/devel/flexvolume.md#unmount) call is only given the mount dir to unmount from. -With this limited information, it is difficult to ascertain more information about the specific volume attachment that is intended. -Currently, the mount dir has some of this information encoded in the full path. -Take for example the following mount dir: -``` -/var/lib/kubelet/pods/4b859788-9290-11e7-a54f-001c42fe7d2c/volumes/kubernetes.io~rbd/pvc-4b7eab9a-9290-11e7-a54f-001c42fe7d2c -``` - -Given the mount dir above, one can infer that it is for pod `4b859788-9290-11e7-a54f-001c42fe7d2c` and PV `pvc-4b7eab9a-9290-11e7-a54f-001c42fe7d2c`. -The agent will use this information to look up the correct CRD instance, granting itself full context for how to perform the detach and unmount. - -Parsing this particular mount dir format doesn't have any guarantee of stability in future Kubernetes releases, so we cannot rely on this long term. -The ideal solution would be for the Kubelet to pass along full context information to the `Unmount()` call. -This improvement is being tracked in https://github.com/kubernetes/kubernetes/issues/52590. - -#### Usage of CRDs -The control flow described above explained how the agent uses the Kubernetes API to create a volume attach CRD that represents the attachment of a cluster volume to a specific cluster node. -This usage of the Kubernetes API is a reason why the Flexvolume driver, which does not run in a context that has any cluster credentials, is insufficient for this design, and thus why a Rook agent pod is desirable. -Let's enumerate some of the benefits of using a volume attach CRD: - -1. The admin gets a native `kubectl` experience for viewing and getting information on attached volumes in their cluster. -1. The CRD helps provide fencing for the volume in a generalized way, it is not specific to the underlying storage Rook is creating and managing. -The existence of the CRD provides a means of bookkeeping to signal that the volume is locked and in use. -1. In the event that a node dies that had a volume attached, a CRD allows centralized detachment of the volume by the Rook operator. This will be explained in more detail in the [fencing section](#fencing). - -#### Improvements Over Rook's Current Support for Persistent Volumes -Rook currently has a hybrid approach of providing persistent storage in a Kubernetes cluster. While the Rook operator implements a dynamic volume provisioner, the attach and mounting is offloaded to the existing [RBD plugin](https://github.com/kubernetes/kubernetes/tree/master/pkg/volume/rbd). -This requires that the Ceph tools are installed alongside the Kubelet on every node in the cluster, which causes friction in the experience for users of Rook. -Furthermore, if users want to consume a Rook volume outside of the default namespace, then they must manually copy secrets to their namespace of choice. - -Both of these issues are handled by this proposed design for the Rook agent, greatly streamlining the Rook storage experience in Kubernetes and removing the common causes for errors and failure. This is a big win for our users. - -This proposed design also allows Rook to normalize on a single path for providing persistent volumes across all Kubernetes versions that it supports. -Since this design is fully out-of-tree for Kubernetes, it is not tied to Kubernetes release timelines. -Updates, fixes and new features can be released through the normal Rook release process and timelines. - -The experience is also normalized across other distributed storage platforms that Rook will support in the future, such as GlusterFS. -Instead of the user having to create a storageclass specific for the underlying distributed storage (e.g., RBD if Rook is using Ceph), this will abstract that decision away, allowing the user to simply request storage. -This was already true of the hybrid approach in Rook v0.5, but it's worth noting here still. - -#### Volume Attach CRD Details -This section will discuss the volume attach CRD in more detail. -The name (primary identity) of each CRD instance will be the Persistent Volume (PV) name that it is for. -This will enable very efficient look ups of volume attach instances for a particular PV, thereby making fencing checks very efficient. - -Each CRD instance will track all consumers of the volume. -In the case of `ReadWriteOnce` there will only be a single consumer, but for `ReadWriteMany` there can be multiple simultaneous consumers. -There will be an `Attachments` list that captures each instance of pod, node, and mount dir, which can all be used for the fencing checks described in the next section. - -The full schema of the volume attachment CRD is shown below: -```go -type Volumeattachment struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Attachments []Attachment `json:"attachments"` -} - -type Attachment struct { - Node string `json:"node"` - PodNamespace string `json:"podNamespace"` - Pod string `json:"pod"` - MountDir string `json:"mountDir,omitempty"` -} -``` - -#### Fencing -Ensuring either exclusive single client access (`ReadWriteOnce`) or shared multi-pod access (`ReadWriteMany`) to the persistent volume can be achieved by the Rook agent, -independently from the underlying storage protocol that is being used (RBD, NBD, iSCSI, etc.). -Remember that during the attach operation, the Rook agent will create a volume attach CRD. -When the agent performs the attach on its local node, it can create the CRD with information stating that the volume is now locked and who it is locked by. -Thus, the CRD itself is providing the accounting for the volume's lock in a generalized way, independent of the of the underlying storage protocol the agent chose to use for the volume. -Of course, this lock can also be augmented at the lower layer of the specific storage protocol that's being used. - -##### **Race conditions** -While using the volume attach CRD for fencing, it is important to avoid race conditions. -For example, if two pods are attempting to use the same volume at the same time, we must still ensure that only one of them is granted access while the other ones fails. -This can be accomplished due to consistency guarantees that Kubernetes grants for all its objects, including CRDs, because they are stored in etcd. -For CRD create operations, an error will be returned if a CRD with that name already exists, and a duplicate CRD will not be created. -Furthermore, for update operations, an error will be returned if an attempt to update an existing CRD occurs that specifies an out of date version of the object. - -##### **ReadWriteOnce** -For `ReadWriteOnce`, the agent needs to ensure that only one client is accessing the volume at any time. -During the `Mount()` operation, the agent will look for an existing CRD by its primary key, the PV ID. -If no CRD currently exists, then the agent will create one, signifying that it has won exclusive access on the volume, then proceed with the attach and mount. -The CRD that the agent creates will contain the pod, node and mount dir of the current attachment. - -If a CRD does already exist, the agent will check its existing attachments list. -If the list specifies that the volume is attached to a different pod than the one we are currently mounting for, then another consumer already has exclusive access of the volume and the agent must honor `ReadWriteOnce` and fail the operation. -However, if the previous attachment is for the **same** pod and namespace that we are currently mounting for, this means that the volume is being failed over to a new node and was not properly cleaned up on its previous node. -Therefore, the agent will "break" the old lock by removing the old attachment entry from the list and adding itself, then continuing with attaching and mounting as usual. - -##### **ReadWriteMany** -For `ReadWriteMany`, the agent will allow multiple entries in the attachments list of the CRD. When `Mount()` is called, the agent will either create a new CRD instance if it does not already exist, or simply add a new attachment entry for itself to the existing CRD. - -##### **Detach** -During the detach operation, the CRD will be deleted to signify that the volume has been unlocked (or updated to remove the entry from the attachment list of the CRD for the case of `ReadWriteMany`). - -However, if the node where the volume was attached dies, the agent on that node may not get a chance to perform the detach and update the CRD instance. -In this case, we will need to clean up the stale record and potentially perform the detach later on if the node returns. -This will be performed in two separate ways: -1. As previously mentioned, the agent will check for existing attachments when it is requested to `Attach()`. If it finds that the attachment belongs to a node that no longer exists, it will "break" the lock as described above. -1. Periodically (once per day), the operator will scan all volume attachment CRD instances, looking for any that belong to a node that no longer exists. - -In both cases, when a "stale" attachment record is found, its details will be added to a volume attachment garbage collection list, indexed by node name. -Upon start up of a Rook agent on a node, as well as periodically, the agent can look at this GC list to see if any are for the node its running on. -If so, the agent will attempt to detach (if the device still exists) and then remove the entry from the GC list. -Additionally, if there are "stale" records that are no longer applicable for a given node (e.g., a node went down but then came back up), the agent should clean up those invalid records as well. - -#### Security -The only interface for communicating with and invoking operations on the Rook agent is the Unix domain socket. -This socket will have read/write only accessible by `root` and it is only accessible on the local node (not remotely accessible). - -### **Cluster Cleanup** -Rook makes storage as a service a deeply integrated part of the Kubernetes cluster as opposed to an external entity. -This integration makes more attention to lifecycle management of the storage components necessary. - -#### Hung RBD Kernel Module -If the monitor pods of a Rook cluster are no longer accessible while block storage is mapped to a node, the kernel RBD module will be hung and require a [power cycle of the machine](https://github.com/rook/rook/issues/376#issuecomment-318803799). - -The Rook agent can help mitigate this scenario, by watching for the cluster CRD delete event. -When a Rook cluster is being deleted, there may still be consumers of the storage in the cluster in the form of pods with PVCs. -When a Rook agent receives a cluster CRD delete event, they will respond by checking for any Rook storage on the local node they are running on and then forcefully remove them. - -To forcefully remove the storage from local pods, the agent will perform the following sequence of steps for each Rook PVC: -```bash -$ kubectl delete pvc $pvc_name -$ sudo rbd unmap -o force /dev/rbdX - -# wait for it to time out or send SIGINT -$ kubectl delete pv $pv_name -$ sudo umount $mount_point -``` - -As mentioned above, each agent will be watching for events on cluster CRDs. -Even in a large scale cluster, this is appropriate to have many watchers on the cluster CRDs for a couple reasons: -1. Cluster CRD events are relatively rare since they are tied to cluster lifecycles -1. Each agent truly does need to be informed about cluster CRD events since they all may need to act on them - -## **Open Questions** -1. Reliability: Does the centralized `Detach()` fallback by the operator work reliably when the node consuming the volume has died? We will vet this further while we are testing the implementation under real world scenarios. -1. Security: Are there further security considerations that need to be made? -1. Portability: Since this design does not need to run anything on the master node (control plane), it is fairly portable across Kubernetes deployments and environments. However, some environments, such as Google Container Engine, do not have suitable kernel drivers for I/O traffic. Those environments need to be updated with a lowest common denominator kernel driver, such as `NBD`. diff --git a/design/ceph/multus-network.md b/design/ceph/multus-network.md deleted file mode 100644 index ed039cdd0..000000000 --- a/design/ceph/multus-network.md +++ /dev/null @@ -1,134 +0,0 @@ -# Multus networking integration with Ceph (not finalized yet and subject to update) - -We have already explored and explained the benefit of multi-homed networking, so this document will not rehearse that but simply focus on the implementation for the Ceph backend. -If you are interested in learning more about multi-homed networking you can read the [design documentation on that matter](../core/multi-homed-cluster.md). - -To make the story short, [Multus](https://github.com/intel/multus-cni) should allow us to get the same performance benefit as `HostNetworking` by increasing the security. -Using `HostNetworking` results in exposing **all** the network interfaces (the entire stack) of the host inside the container where Multus allows you to pick the one you want. -Also, this removes the need of privileged containers (required for `HostNetworking`). - -## Proposed CRD changed - -We already have a `network` CRD property, which looks like: - -```yaml -network: - provider: - selectors: -``` - -We will expand the `selectors` with the following two hardcoded keys: - -```yaml -selectors: - public: - cluster: -``` - -Each selector represents a [`NetworkAttachmentDefinition`](https://github.com/intel/multus-cni/blob/master/doc/quickstart.md#storing-a-configuration-as-a-custom-resource) object in Multus. -At least, one must be provided and by default they will represent: - -- `public`: data daemon public network (binds to `public_network` Ceph option). -- `cluster`: data daemon cluster network (binds to `cluster_network` Ceph option). - -If only `public` is set then `cluster` will take the value of `public`. - -## Multus supported configuration - -### Interface type - -As part of the CNI spec, Multus supports several [interface types](https://github.com/containernetworking/plugins#main-interface-creating). -Rook will naturally support any of them as they don't fundamentally change the working behavior. - -### IPAM type - -This is where things get more complex. -Currently there are [three different IPAM](https://github.com/containernetworking/plugins#ipam-ip-address-allocation) solutions available. - -As part of our research we have found that the following IPAM types are not good candidates: - -- host-local: Maintains a local database of allocated IPs, this only works on a per host basis, so not suitable for a distributed environment since we will end up with IP collision -To fix this, the [whereabouts project](https://github.com/dougbtv/whereabouts) looks promising but is not officially supported. -- static: Allocate a static IPv4/IPv6 addresses to container and it's useful in debugging purpose. -This cannot at scale because this means we will have to allocate IPs for **all** the daemon so it's not scalable. - -You can find a more detailed analysis at the end of the document in the [rejected proposal section](#rejected-proposals). - -## Ceph daemons implementation challenges - -### Monitors and OSDs - -The Ceph monitors only need access to the public network. -The OSDs needs access to both public and cluster networks. - -Monitors requirements so far are the following: - -- Predictable IP addresses -- Keep the same IP address for the life time of the deployment (IP should survive a restart) - -### RGW implementation - -Only need access to the public network. -They use service IP with a load-balancer so we need to be careful. - -### MDS/RBD-MIRROR/NFS implementation - -Only need access to the public network. -Nothing to do in particular since they don't use any service IPs. - -### CSI pods - -We can add annotations to these pods and they can reach out the Ceph public network, then the driver will expose the block or the filesystem normally. - -## Accepted proposal - -So far, the team has decided to go with the [whereabouts](https://github.com/dougbtv/whereabouts) IPAM. -It's an IP Address Management (IPAM) CNI plugin that assigns IP addresses cluster-wide. -If you need a way to assign IP addresses dynamically across your cluster -- Whereabouts is the tool for you. If you've found that you like how the host-local CNI plugin works, but, you need something that works across all the nodes in your cluster (host-local only knows how to assign IPs to pods on the same node) -- Whereabouts is just what you're looking for. -Whereabouts can be used for both IPv4 & IPv6 addressing. - -It is under active development and is not ready but ultimately will allow to: - -- Have static IP addresses distributed across the cluster -- These IPs will survive any deployment restart -- The allocation will be done by whereabouts and not by Rook - -/!\ - -The only thing that is not solved yet is how can we predict the IPs for the upcoming monitors? -We might need to rework the way we bootstrap the monitors a little bit to not require to know the IP in advance. - -## Rejected proposals - -The following proposal were rejected but we keep them here for traceability and knowledge. - -### IPAM type 'DHCP' - -In this scenario, a DHCP server will distribute an IP address to a pod using a given range. - -Pros: - -- Pods will get a dedicated IP on a physical network interface -- No changes required in Ceph, Rook will detect the CIDR via the `NetworkAttachmentDefinition` then populate Ceph's flag `public_network` and `cluster_network` - -Cons: - -- IP allocation not predictable, we don't know it until the pod is up and running. -So the detection must append inside the monitor container is running, similarly to what the OSD code does today. -- Requires drastic changes in the monitor bootstrap code -- This adds a DHCP daemon on every part of the cluster and this has proven to be troublesome - -Assuming we go with this solution, we might need to change the way the monitors are bootstrapped: - -1. let the monitor discovered its own IP based on an interface -2. once the first mon is bootstrapped, we register its IP in a ConfigMap as well as populating clusterInfo -3. boot the second mon, look up in clusterInfo for the initial member (if the op dies in the process, we always `CreateOrLoadClusterInfo()` as each startup based on the cm so no worries) -4. go on and on with the rest of the monitors - -TBT: if the pod restarts, it keeps the same IP. - -### IPAM type 'DHCP' with service IP - -We could and this is pure theory at this point use a IPAM with DHCP along with service IP. -This would require interacting with Kubeproxy and there is no such feature yet. -Even if it was there, we decided not to go with DHCP so this is not relevant. diff --git a/design/ceph/object/bucketpolicy.md b/design/ceph/object/bucketpolicy.md deleted file mode 100644 index 77ee75721..000000000 --- a/design/ceph/object/bucketpolicy.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: adding bucket policy for ceph object store -target-version: release-1.4 ---- - -# Feature Name -Adding bucket policy support for ceph object store - -## Summary -The bucket policy is the feature in which permissions for specific user can be set on s3 bucket. Read more about it from [ceph documentation](https://docs.ceph.com/docs/master/radosgw/bucketpolicy/) - -Currently [ceph object store](/Documentation/ceph-object.md) can be consumed either via [OBC](/Documentation/ceph-object-bucket-claim.md) and [ceph object user](/Documentation/ceph-object-store-user-crd.md). As of now there is no direct way for ceph object user to access the OBC. The idea behind this feature to allow that functionality via Rook. Refer bucket policy examples from [here](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html). Please note it is different from [IAM policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html). - -## Proposal details - -The following settings are needed to add for defining policies: - - _bucketPolicy_ in the `Spec` section of `CephObjectStoreUser` CR - - _bucketPolicy_ in the `parameters` section of `StorageClass` for `ObjectBucketClaim` - -Policies need to be provided in generic `json` format. A policy can have multiple `statements`. - -Rook must perform the following checks to verify whether the `bucketpolicy` applicable to OBC. -- for ceph object user, `Principal` value should have username and `Resource` should have specific bucket names. It can be defined for buckets which are not part of the OBC as well, the `bucketname` of an OBC can be fetched from its `configmap`. -- In `StorageClass`, `Principal` value should be `*`(applicable to all users) and `Resource` should have the bucket name can be empty since it can be generated name from Rook as well and will be attached before setting the policy. - -Examples: - -```yaml -# storageclass-bucket.yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-delete-bucket -provisioner: rook-ceph.ceph.rook.io/bucket -reclaimPolicy: Delete -parameters: - objectStoreName: my-store - objectStoreNamespace: rook-ceph - region: us-east-1 - bucketName: ceph-bkt - bucketPolicy: "Version": "2012-10-17", - "Statement": [ - { - "Sid": "listobjs", - "Effect": "Allow", - "Principal": {"AWS": ["arn:aws:iam:::*"]}, - "Action": "s3:ListObject", - "Resource": "arn:aws:s3:::/*" - } - ] - -# object-user.yaml -apiVersion: ceph.rook.io/v1 -kind: CephObjectStoreUser -metadata: - name: my-user - namespace: rook-ceph -spec: - store: my-store - displayName: "my display name" - bucketPolicy: "Version": "2012-10-17", - "Statement": [ - { - "Sid": "putobjs" - "Effect": "Allow", - "Principal": {"AWS": ["arn:aws:iam:::my-user"]}, - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::ceph-bkt-1/*" - }, - { - "Sid": "getobjs" - "Effect": "Allow", - "Principal": {"AWS": ["arn:aws:iam:::my-user"]}, - "Action": "s3:GettObject", - "Resource": "arn:aws:s3:::ceph-bkt-2/*" - } - ] -``` -In the above examples, the `bucket policy` mentioned in the `storage class` will be inherited to all the OBCs created from it. And this policy needs to be for the anonymous users(all users in the ceph object store), it will be attached to the bucket during the OBC creation. -In the case of `ceph object store user` the policy can have multiple statements and each represents a policy for the existing buckets in the `ceph object store` for the user `my-user`. During the creation of the user, the `bucketPolicy` CRD will convert into and divide into different bucket policy statement, then fetch each bucket info, and using the credentials of bucket owner this policy will be set via s3 API. -The `bucketPolicy` defined on CRD won't override any existing policies on that bucket, will just append. But this can be easily overwritten with help of S3 client since does not have much control over there. - -## APIs and structural changes - -The following field will be added to `ObjectStoreUserSpec` and this need to reflected on the existing API's for `CephObjectStoreUser` - -``` -type ObjectStoreUserSpec struct { - Store string `json:"store,omitempty"` - //The display name for the ceph users - DisplayName string `json:"displayName,omitempty"` -+ //The list of bucket policy for this user -+ BucketPolicy string `json:"bucketPolicy,omitempty"` - } -``` - -The `bucket policy` feature is consumed by the brownfield use case of `OBC`, so supporting apis and structures already exists in [policy.go](/pkg/operator/ceph/object/policy.go). Still few more api's are need to read the policy from CRD, validate it and then convert it into `bucketpolicy`, so that can be consumed by existing api's diff --git a/design/ceph/object/ceph-bucket-notification-crd.md b/design/ceph/object/ceph-bucket-notification-crd.md deleted file mode 100644 index 04d461e49..000000000 --- a/design/ceph/object/ceph-bucket-notification-crd.md +++ /dev/null @@ -1,87 +0,0 @@ -# Ceph bucket notifications CRD - -## Overview - -Ceph added support for the bucket notifications feature from Nautilus onwards. It allows sending messages to various endpoints when a new event occurs on a bucket [ref](https://docs.ceph.com/docs/master/radosgw/notifications/) - -Setup of those notifications are normally done by sending HTTP requests to the RGW, either to create/delete topics pointing to specific endpoints, or create/delete bucket notifications based on those topics. - -This functionality eases this process by avoiding to use external tools or scripts. It is replaced by creation of CR definitions that contain all the information necessary to create topics and/or notifications, which the rook operator processes. - -## Goals - -Creates a CRD for topics and a CRD for notifications, defining all the necessary and optional information for the various endpoints. - -Extends the rook operator to handle the CRs that would be submitted by users. - -## Implementation - -The CR for a topic configuration takes this form: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephBucketTopic -metadata: - name: # name of the topic - namespace: # namespace where topic belongs -Spec: - endpoint: #(mandatory) URI of an endpoint to send push notification to - opaqueData: #(optional) opaque data is set in the topic configuration - persistent: false #(optional) indication whether notifications to this endpoint are persistent or not (`false` by default) - # Endpoint specific parameters - http: - verifySSL: true #indicate whether the server certificate is validated by the client or not (`true` by default) - amqp: - ackLevel: broker # none/routable/broker, optional (default - broker) - amqpExchange: direct # exchanges must exist and be able to route messages based on topics - kafka: - useSSL: true # secure connection will be used for connecting with the broker (`false` by default) - caLocation: # this specified CA will be used, instead of the default one, to authenticate the broker - ackLevel: broker # none/broker, optional (default - broker) -``` -P.S : Endpoint can be of different format depends on the server -- http -> `http[s]://[: `amqp://[:@][:][/]` -- kafka -> `kafka://[:@][: CephObjectZoneGroup -> CephObjectZone -> CephObjectStore - - -## Inventory of resources that could have dependency relationships -All Rook-Ceph CRDs: -- CephCluster -- CephBlockPool -- CephFilesystem -- CephObjectStore -- CephObjectStoreUser -- CephNFS -- CephClient -- CephRBDMirror -- CephFilesystemMirror -- CephObjectRealm -- CephObjectZoneGroup -- CephObjectZone - -Resources for which Rook-Ceph acts as a driver/provisioner: -- ObjectBucket (lib-bucket-provisioner) -- Bucket (COSI) - -Kubernetes resources which can depend on Rook: -- StorageClass (Kubernetes) -- PersistentVolume (Kubernetes) - - -## Proposed dependency relationships -A graph of proposed dependency relationships is shown below with more detail to follow. -![Graph view of resource dependencies](resource-dependencies.png) - -#### `CephCluster` -A CephCluster does not create pools itself, but the Ceph cluster it represents houses pools, and -users can manually create pools using Ceph tooling outside of Kubernetes manifests. It is useful but -not critical to understand which resources interact with pools and in what ways. - -Dependents which can create/delete pools: -- CephBlockPools in the same namespace -- CephFilesystems in the same namespace -- CephObjectStores in the same namespace -- CephObjectZones in the same namespace - -Dependents which can consume arbitrary pools including user-created pools: -- CephNFSes in the same namespace -- CephClients in the same namespace - -Dependents that do not interact with pools: -- CephRBDMirrors in the same namespace -- CephFilesystemMirrors in the same namespace -- CephObjectRealms in the same namespace -- CephObjectZoneGroups in the same namespace - -It is most safe if the `CephCluster` treats all possible Rook-Ceph CRs besides itself as simple -dependents. If a dependent exists in the same namespace, block deletion. In this way, CephCluster -is the most protected provider resource. It also acts as a root for preserving deletion ordering. - -#### `CephBlockPool` -Dependents which can consume this provider's pools: -- CephNFSes in the same namespace that have `spec.pool == ` -- CephClients in the same namespace that have any `spec.caps` value with the string `pool=` - -Dependents via CSI: -- StorageClasses that have: - - `provisioner == .rbd.csi.ceph.com` AND - - `parameters.clusterID == ` AND - - `parameters.pool` OR `parameters.dataPool` references a `CephBlockPool` pool -- PersistentVolumes that have: - - `spec.CSI.Driver` == `.rbd.csi.ceph.com` AND - - `spec.CSI.VolumeAttributes["clusterID"] == ` AND - - `spec.CSI.VolumeAttributes["pool"]` OR `spec.CSI.VolumeAttributes["journalPool"]` references a `CephBlockPool` pool - - OR - - `spec.storageClassName == ` - - NOTE: dependents should continue to be ignored if `spec.cleanupPolicy.allowUninstallWithVolumes == true` - -Dependents via FlexVolume: -- Unnecessary because Rook plans to deprecate FlexVolume support for v1.7 - -#### `CephFilesystem` -Dependents which can consume this provider's pools: -- CephNFSes in the same namespace that have `spec.pool == ` -- CephClients in the same namespace that have any `spec.caps` value with the string `pool=` - -Dependents via CSI: -- StorageClasses that have: - - `provisioner == .cephfs.csi.ceph.com` AND - - `parameters.clusterID == ` AND - - `parameters.pool` OR `parameters.dataPool` references a `CephBlockPool` pool -- PersistentVolumes that have: - - `spec.CSI.Driver` == `.cephfs.csi.ceph.com` AND - - `spec.CSI.VolumeAttributes["clusterID"] == ` AND - - `spec.CSI.VolumeAttributes["pool"]` OR `spec.CSI.VolumeAttributes["journalPool"]` references a `CephBlockPool` pool - - OR - - `spec.storageClassName == ` - - NOTE: dependents should continue to be ignored if `spec.cleanupPolicy.allowUninstallWithVolumes == true` - -Dependents via FlexVolume: -- Unnecessary because Rook plans to deprecate FlexVolume support for v1.7 - -#### `CephObjectStore` -Dependents which can consume this provider's pools: -- CephNFSes in the same namespace that have `spec.pool == ` -- CephClients in the same namespace that have any `spec.caps` value with the string `pool=` - -Dependents which reference this provider by name: -- CephObjectStoreUsers in the same namespace that have `spec.store == ` - -Dependents via lib-bucket-provisioner: -- ObjectBucketClaims that have: - - `spec.endpoint.bucketHost == ..svc` AND - - `spec.endpoint.bucketName == ` - -Dependents via COSI: -- Buckets with a reference to a provider's service and bucket (specific paths TBD by COSI design) - -#### `CephObjectZone`, `CephObjectZoneGroup`, and `CephObjectRealm` -These resources are all part of Rook-Ceph's multi-site object storage strategy. - -`CephObjectZone` creates pools. A zone can be effectively thought of as the "object store" itself. -Dependents which can consume this provider's pools: -- CephObjectStores in the same namespace that have `spec.zone.name == CephObjectZone.metadata.name` -- CephNFS in the same namespace that have `spec.pool = ` -- CephClient in the same namespace that has an `spec.caps` value with the string `pool=` - -`CephObjectRealm` has dependents: -- CephObjectZoneGroups in the same namespace that have `spec.realm == CephObjectRealm.metadata.name` - -`CephObjectZoneGroup` has dependents: -- CephObjectZones in the same namespace that have `spec.zoneGroup == CephObjectZoneGroup.metadata.name` - -#### Mirroring resources -CephRBDMirror has dependents: -- CephBlockPools in the same namespace that have `spec.mirroring.enabled == true` - -CephFilesystemMirror has dependents: -- CephFilesystems in the same namespace that have `spec.mirroring.enabled == true` - - -## Analysis and discussion -### Reusable patterns -We can identify some common metrics for determining whether a resource is a dependent of a given -"provider" resource. Not all metrics are always applicable, but each these metrics appear more than -once. It should be possible to design reusable patterns/methods for reusing logic. -- Is a dependent in the same namespace as the provider? -- Does a dependent reference the current Rook-Ceph operator? -- Does a dependent reference the Ceph cluster namespace of the provider? -- Does the dependent reference pools created by provider? -- Does the dependent reference the provider by name? (e.g., CephObjectZone references CephObjectZoneGroup) - -### User feedback -It will be important for the user to understand why resources are not being deleted if Rook is -blocking the deletion. This design proposes that the Rook operator report to the user when it is -blocking deletion of a resource due to dependents in two ways: -- report it in a status condition of the resource object being deleted -- report a Kubernetes Event ([reference](https://kubernetes.io/blog/2018/01/reporting-errors-using-kubernetes-events/)) - -Using both of these methods will maximize user visibility. - -**Status:** -Reported statuses will be modified as follows: -1. Object's status.phase should be changed to "Deleting" as soon as a deletion timestamp is detected - and never changed -1. A status.condition should be added if the operator is blocking deletion: - - Type: "DeletionIsBlocked" - - Status: "True" - - Reason: "ObjectHasDependents" - - Message: "object deletion is blocked because it has dependents:" followed by a full list of - which dependents exist of which Kubernetes Object Kinds (e.g., CephBlockPools or - PersistentVolumes) or of which Ceph kinds (e.g., pools or buckets). - -**Event:** -Reported events will have the following content: -- Type: "Warning" -- Reason: "ReconcileFailed" -- Message: "object deletion is blocked because it has dependents:" followed by a full list of which - dependents exist of which Kubernetes Object Kinds (e.g., CephBlockPools or PersistentVolumes) or - of which Ceph kinds (e.g., pools or buckets). - -### Detecting PersistentVolume dependents -Rook currently inspects Kubernetes PersistentVolume (PV) resources when deleting CephClusters. This -provides protection from deleting the backing Ceph cluster when user applications are using it. - -With the changes proposed here, it would be more specific to block deleting CephBlockPool resources -when there are PVs referencing the specific CephBlockPool. Similarly, it would be more specific to -block deleting CephFilesystem resources when there are PVs referencing the specific CephFilesystem. - -Kubernetes APIs are quite stable, and it is unlikely that the methods Rook uses to inspect -PersistentVolumes will require changes with any regularity. If changes are necessary, Kubernetes -will likely give well over a year of time to migrate away from deprecated API elements. - -Removing a Ceph cluster that is hosting PVs in use by Kubernetes applications could be disastrous -for users. Therefore, this document proposes to continue checking for PersistentVolumes that depend -on Ceph storage. The document further proposes to increase protection of user data by detecting -dependencies for specific CephBlockPools and CephFilesystems when they are deleted rather than -checking when the CephCluster is deleted. Detecting the existence of PVs when the CephCluster is -deleted then becomes redundant and should be removed. - -### Detecting StorageClass dependents -As a note, StorageClasses (SCs) are only used during initial creation of a -PersistentVolume based on the StorageClass. - -It may not be a good idea to block deletion when there are StorageClasses that reference Rook-Ceph -block/file storage. An admin may at times wish to leave StorageClasses -(a more user-focused API point) and replace the Rook-Ceph resources providing the storage -represented by the SC without disturbing users' ability to reference the SC. Any user that tried to -use the StorageClass while the cluster was down would merely fail until a replacement cluster came -online. - -This document outlines the steps needed to treat StorageClasses as a dependent but proposes not to -implement the dependency at this time. If we get more information in the future that provides a -compelling use-case for treating StorageClasses as dependencies, this functionality can be -implemented at that time. - -### Detecting object bucket dependents without querying lib-bucket-provisioner or COSI resources -Detecting lib-bucket-provisioner ObjectBuckets that are dependent on a given CephObjectStore -requires inspecting ObjectBuckets for a reference to a bucket found in the object store as well as a -reference to the address of the Kubernetes Service created for access to RGWs -(`..svc`). Detecting COSI Buckets will be similar. - -This detection does require accessing external APIs (lib-bucket-provisioner and COSI). This is -non-ideal for COSI whose APIs will be progressing from v1alpha1, through beta stages, and then into -v1 in the coming months/years. Rook can merely check for the existence of buckets in order to -support lib-bucket-provisioner and COSI simultaneously with the same code. This would also remove -any need for Rook to update its COSI API for dependency checking though it will still need to -update its API to continue operating as a COSI driver. This would allow Rook to block deletion of -CephObjectStores without care for who has "claimed" a bucket. - -Given that a Kubernetes cluster might have many hundreds of ObjectBuckets, or COSI Buckets, having a -simpler way of querying for dependents can lighten the load on the Kubernetes API server and reduce -Rook's resource usage. - -Since OBs and COSI Buckets both result in buckets being created in a CephObjectStore, Rook can query -the object store and block deletion if buckets exist. - -This is an elegant solution for blocking when buckets are claimed by these outside resources. It -does mean that CephObjectStores that have had buckets created in them directly (by admins or users) -will block until the buckets are manually deleted. An admin may need to request that users remove -unneeded buckets or may instead remove the buckets themselves. - -If the admin wishes to preserve the CephObjectStore's pools on deletion along with their data, the -admin may merely remove the finalizer on the CephObjectStore. - -While this requires more steps to delete the CephObjectStore, it provides additional safety for user -data by requiring users or admins to inspect and clean up unneeded buckets. - -The main downside to this approach is that Rook will not be able to report if there are specific -OBs or COSI Buckets consuming storage from the CephObjectStore. An admin would need to examine the -resources in their cluster to determine if there are claims to the storage manually. A diligent -admin will likely have done this work beforehand. - -This document proposes to implement this strategy to avoid reliance on external APIs. Rook -developers should revisit the decision at a later date to discuss whether the strategy is continuing -to adequately meet users' needs and whether the drawbacks noted are causing any issues. - -### Implementation in stages -This design will result in changes to every major Rook-Ceph controller. However, it should be quite -easy to tackle these changes in stages so that changes can be more easily implemented and reviewed. - -Stages by priority: -1. Block deletion of a CephCluster when there are other Rook-Ceph resources in the same namespace -1. Block deletion of a CephObjectStore when there is a user bucket present in the store. - - This already has an implementation, but it blocks for unrelated OBs -1. Block deletion of a CephBlockPool, CephFilesystem, CephObjectStore, or CephObjectZone when a - CephNFS uses one of its pools. -1. Block deletion of a CephBlockPool or CephFilesystem when there is a PersistentVolume reliant on it. -1. Block deletion of a CephObjectStore when there is a CephObjectStoreUser reliant on it. -1. Block deletion of a CephRBDMirror if a CephBlockPool has mirroring enabled, AND - Block deletion of a CephFilesystemMirror if a CephFilesystem has mirroring enabled. - - These are such similar checks that they should be simple to do at the same time. -1. Block deletion of object multisite resources with their hierarchy: - - CephObjectRealm -> CephObjectZoneGroup -> CephObjectZone -> CephObjectStore -1. Block deletion of a CephBlockPool, CephFilesystem, CephObjectStore, or CephObjectZone when a CephClient uses one of its pools. - - CephClients are seldom used, and few users will be affected if this is not in place. - -### Future work -Immediate feedback is always more helpful to users when possible. It should be possible to implement -dependency checks in a validating admission controller quite easily, but we don't currently know if -the controller is able to validate delete requests. This design leaves this investigation for the -future, but implementation of the design can strive to make dependency checking code easily reusable -for the admission controller when work on that might begin. diff --git a/design/ceph/resource-dependencies.png b/design/ceph/resource-dependencies.png deleted file mode 100644 index fec032d61..000000000 Binary files a/design/ceph/resource-dependencies.png and /dev/null differ diff --git a/design/ceph/rook-ceph-status-conditions.md b/design/ceph/rook-ceph-status-conditions.md deleted file mode 100644 index ba76e9726..000000000 --- a/design/ceph/rook-ceph-status-conditions.md +++ /dev/null @@ -1,150 +0,0 @@ -# Rook Ceph Operator Status Conditions - -Reference: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md - -## Background - -Currently, Ceph Cluster stores the state of the system in `Status.State`. But, we want to implement the usage of `Status.Conditions` instead of using `Status.State`. The usage of `Status.Phase` is deprecated over time because it contradicts the system design principle and hampered evolution. So rather than encouraging clients to infer the implicit properties from phases, the usage of `Status.Condition` is preferred. Conditions are more extensible since the addition of new conditions doesn't invalidate decisions based on existing conditions, and are also better suited to reflect conditions that may toggle back and forth and/or that may not be mutually exclusive. - -## Conditions - -Conditions simply represent the latest available observation of an object's state. They are an extension mechanism intended to be used when the details of observation are not a priori known or would not apply to all instances of a given Kind. Objects can have multiple Conditions, and new types of Conditions can also be added in the future by the third-party controllers. Thus, Conditions are thereby represented using list/slice, where each having the similar structure. - -## System States for rook-ceph - -The necessary system states for the rook-ceph can be portrayed as follows: - - Ignored : If any of the resources gets ignored for multiple reasons - Progressing : Marks the start of reconcile of Ceph Cluster - Ready : When Reconcile completes successfully - Not Ready : Either when cluster is Updated or Updating is blocked - Connecting : When the Ceph Cluster is in the state of Connecting - Connected : When the Ceph Cluster gets connected - Available : The Ceph Cluster is healthy and is ready to use - Failure : If any failure occurs in the Ceph Cluster - Cluster Expanding : If the Cluster is Expanding - Upgrading : When the Cluster gets an Upgrade - -## Implementation Details - -Reference: https://github.com/openshift/custom-resource-status/: - -The `Status` of the Condition can be toggled between True or False according to the state of the cluster which it goes through. This can be shown to the user in the clusterCR with along with the information about the Conditions like the `Reason`, `Message` etc. Also a readable status, which basically states the final condition of the cluster along with the Message, which gives out some detail about the Condition like whether the Cluster is 'ReadytoUse' or if there is an Update available, we can update the MESSAGE as 'UpdateAvailable'. This could make it more understandable of the state of cluster to the user. Also, a Condition which states that the Cluster is undergoing an Upgrading can be added. Cluster Upgrade happens when there is a new version is available and changes the current Cluster CR. This will help the user to know the status of the Cluster Upgrade in progress. - - NAME DATADIRHOSTPATH MONCOUNT AGE CONDITION MESSAGE HEALTH - rook-ceph /var/lib/rook 3 114s Available ReadyToUse HEALTH_OK - - -We can add Conditions simply in the Custom Resource struct as: - - type ClusterStatus struct{ - FinalCondition ConditionType `json:"finalcondition,omitempty"` - Message string `json:"message,omitmepty"` - Condition []RookConditions `json:"conditions,omitempty"` - CephStatus *CephStatus `json:"ceph,omitempty"` - } - -After that we can just make changes inside rook ceph codebase as necessary. The `setStatusCondition()` field will be fed with the `newCondition` variable which holds the entries for the new Conditions. The `FindStatusCondition` will return the Condition if it is having the same `ConditionType` as the `newCondition` otherwise, it will return `nil`. If `nil` is returned then `LastHeartbeatTime` and `LastTransitionTime` is updated and gets appended to the `Condition`. The `Condition.Status` gets updated if the value is different from the `existingCondition.Status`. Rest of the fields of the `Status.Condition` are also updated. The `FinalCondition` will be holding the final condition the cluster is in. This will be displayed into the readable status along with a message, which is an extra useful information for the users. - - -The definition of the type Conditions can have the following details: - - Type RookConditionType `json:"type" description:"type of Rook condition"` - Status ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` - Reason *string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` - Message *string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` - LastHeartbeatTime *unversioned.Time `json:"lastHeartbeatTime,omitempty" description:"last time we got an update on a given condition"` - LastTransitionTime *unversioned.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transition from one status to another"` - -The fields `Reason`, `Message`, `LastHeartbeatTime`, `LastTransitionTime` are optional field. Though the use of `Reason` field is encouraged. - -Condition Types field specifies the current state of the system. Condition status values may be `True`, `False`, or `Unknown`. The absence of a condition should be interpreted the same as Unknown. How controllers handle Unknown depends on the Condition in question. -`Reason` is intended to be a one-word, CamelCase representation of the category of cause of the current status, and `Message` is intended to be a human-readable phrase or sentence, which may contain specific details of the individual occurrence. `Reason` is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes, whereas `Message` is intended to be presented to users in detailed status explanations, such as `kubectl describe output`. - -In the CephClusterStatus, we can either remove the `Status.State` and `Status.Message` fields and call the `Conditions` structure from inside the `ClusterStatus`, or we can just add the `Conditions` structure by keeping the already included fields.The first method is preferred because the `Conditions` structure contains the `Conditions.Type` and `Conditions.Message` which is similar to the `Status.State` and `Status.Message`. According to the above changes, necessary changes are to be made everywhere `ClusterStatus` or one of its fields are referred. - - - -### Examples - -Consider a cluster is being created. The RookConditions is an array that can store multiple Conditions. So the progression of the cluster being created can be seen in the RookConditions as shown in the example below. The Ceph Cluster gets created after it establishes a successful Connection. The `RookCondition` will show in the slice that the `Connecting` Condition will be in `Condition.Status` False. The `Connected` and `Progressing` Types will be set to True. - - Before: - ClusterStatus{ - State : Creating, - Message : The Cluster is getting created, - } - After: - ClusterStatus{ - RookConditions{ - { - Type : Connecting, - Status : False, - Reason : ClusterConnecting, - Message : The Cluster is Connecting, - }, - { - Type : Connected, - Status : True, - Reason : ClusterConnected, - Message : The Cluster is Connected, - }, - { - Type : Progressing, - Status : True, - Reason : ClusterCreating, - Message : The Cluster is getting created, - }, - }, - } -When a Cluster is getting updated, the `NotReady` Condition will be set to `True` and the `Ready` Condition will be set to `False`. - - - Before: - ClusterStatus{ - State : Updating, - Message : The Cluster is getting updated, - } - After: - ClusterStatus{ - RookConditions{ - { - Type : Connecting, - Status : False, - Reason : ClusterConnecting, - Message : The Cluster is Connecting, - }, - { - Type : Connected, - Status : True, - Reason : ClusterConnected, - Message : The Cluster is Connected, - }, - { - Type : Progressing, - Status : False, - Reason : ClusterCreating, - Message : The Cluster is getting created, - }, - { - Type : Ready, - Status : False, - Reason : ClusterReady, - Message : The Cluster is ready, - }, - { - Type : Available, - Status : True, - Reason : ClusterAvailable, - Message : The Cluster is healthy and available to use, - }, - { - Type : NotReady, - Status : True, - Reason : ClusterUpdating, - Message : The Cluster is getting Updated, - }, - }, - } - -In the examples mentioned above, the `LastTransitionTime` and `LastHeartbeatTime` is not added. These fields will also be included in the actual implementation and works in way such that when there is any change in the `Condition.Status` of a Condition, then the `LastTransitionTime` of that particular `Condition` will gets updated. For eg. in the second example indicated above, the `Condition.Status` of the `Condition` is shifted from `True` to `False` while cluster is Updating. So the `LastTranisitionTime` will gets updated when the shifting happens. `LastHeartbeatTime` gets updated whenever the `Condition` is getting updated. diff --git a/design/ceph/security-model.md b/design/ceph/security-model.md deleted file mode 100644 index 7c5a5fe8e..000000000 --- a/design/ceph/security-model.md +++ /dev/null @@ -1,355 +0,0 @@ -# Security Model - -The Rook operator currently uses a highly privileged service account with permissions to create namespaces, roles, role bindings, etc. Our approach would not pass a security audit and this design explores an improvement to this. Furthermore given our use of multiple service accounts and namespace, setting policies and quotas is harder than it needs to be. - -## Goals - * Reduce the number of service accounts and privileges used by Rook - * Reduce the number of namespaces that are used by Rook - * Only use services accounts and namespaces used by the cluster admin -- this enables them to set security policies and quotas that rook adheres to - * Continue to support a least privileged model - -## What we do today - -Today the cluster admin creates the rook system namespace, rook-operator service account and RBAC rules as follows: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: rook-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-operator - namespace: rook-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-operator -rules: -- apiGroups: [""] - resources: ["namespaces", "serviceaccounts", "secrets", "pods", "services", "nodes", "nodes/proxy", "configmaps", "events", "persistenvolumes", "persistentvolumeclaims"] - verbs: [ "get", "list", "watch", "patch", "create", "update", "delete" ] -- apiGroups: ["extensions"] - resources: ["thirdpartyresources", "deployments", "daemonsets", "replicasets"] - verbs: [ "get", "list", "watch", "create", "delete" ] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: [ "get", "list", "watch", "create", "delete" ] -- apiGroups: ["rbac.authorization.k8s.io"] - resources: ["clusterroles", "clusterrolebindings", "roles", "rolebindings"] - verbs: [ "get", "list", "watch", "create", "update", "delete" ] -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: [ "get", "list", "watch", "delete" ] -- apiGroups: ["rook.io"] - resources: ["*"] - verbs: [ "*" ] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-operator -subjects: -- kind: ServiceAccount - name: rook-operator - namespace: rook-system -``` - -`rook-operator` is a highly privileged service account with cluster wide scope. It likely has more privileges than is currently needed, for example, the operator does not create namespaces today. Note the name `rook-system` and `rook-operator` are not important and can be set to anything. - -Once the rook operator is up and running it will automatically create the service account for the rook agent and the following RBAC rules: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-agent - namespace: rook-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-agent -rules: -- apiGroups: [""] - resources: ["pods", "secrets", "configmaps", "persistenvolumes", "nodes", "nodes/proxy"] - verbs: [ "get", "list" ] -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: [ "get" ] -- apiGroups: ["rook.io"] - resources: ["volumeattachment"] - verbs: [ "get", "list", "watch", "create", "update" ] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-agent -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-agent -subjects: -- kind: ServiceAccount - name: rook-ceph-agent - namespace: rook-system -``` - -When the cluster admin create a new Rook cluster they do so by adding a namespace and the rook cluster spec: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: mycluster ---- -apiVersion: rook.io/v1alpha1 -kind: Cluster -metadata: - name: myrookcluster - namespace: mycluster - ... -``` - -At this point the rook operator will notice that a new rook cluster CRD showed up and proceeds to create a service account for the `rook-api` and `rook-ceph-osd`. It will also use the `default` service account in the `mycluster` namespace for some pods. - -The `rook-api` service account and RBAC rules are as follows: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-api - namespace: mycluster ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-api - namespace: mycluster -rules: -- apiGroups: [""] - resources: ["namespaces", "secrets", "pods", "services", "nodes", "configmaps", "events"] - verbs: [ "get", "list", "watch", "create", "update" ] -- apiGroups: ["extensions"] - resources: ["thirdpartyresources", "deployments", "daemonsets", "replicasets"] - verbs: [ "get", "list", "create", "update" ] -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: [ "get", "list" ] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: [ "get", "list", "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-api - namespace: mycluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-api -subjects: -- kind: ServiceAccount - name: rook-api - namespace: mycluster -``` - -The `rook-ceph-osd` service account and RBAC rules are as follows: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-osd - namespace: mycluster ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd - namespace: mycluster -rules: -- apiGroups: [""] - resources: ["configmaps"] - verbs: [ "get", "list", "watch", "create", "update", "delete" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd - namespace: mycluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-osd -subjects: -- kind: ServiceAccount - name: rook-ceph-osd - namespace: mycluster -``` - -## Proposed Changes - -Just as we do today the cluster admin is responsible for creating the `rook-system` namespace. I propose we have a single service account in this namespace and call it `rook-system` by default. The names used are inconsequential and can be set to something different by the cluster admin. - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: rook-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-system - namespace: rook-system -``` - -The `rook-system` service account is responsible for launching all pods, services, daemonsets, etc. for Rook and should have enough privilege to do and nothing more. I've not audited all the RBAC rules but a good tool to do is [here](https://github.com/liggitt/audit2rbac). For example: - -```yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-system -rules: -- apiGroups: [""] - resources: ["pods", "services", "configmaps"] - verbs: [ "get", "list", "watch", "patch", "create", "update, "delete" ] -- apiGroups: ["extensions"] - resources: ["deployments", "daemonsets", "replicasets"] - verbs: [ "get", "list", "watch", "patch", "create", "update, "delete" ] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: [ "get", "list", "watch", "patch", "create", "update, "delete" ] -- apiGroups: ["rook.io"] - resources: ["*"] - verbs: [ "*" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-system - namespace: rook-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-system -subjects: -- kind: ServiceAccount - name: rook-system - namespace: rook-system -``` - -Notably absent here are privileges to set other RBAC rules and create read cluster-wide secrets and other resources. Because the admin created the `rook-system` namespace and service account they are free to set policies on them using PSP or namespace quotas. Similar to the ones defined [here](https://github.com/rook/rook/blob/master/Documentation/kubernetes.md#rbac-for-podsecuritypolicies)). - -Also note that while we use a `ClusterRole` for rook-system we only use a `RoleBinding` to grant it access to the `rook-system` namespace. It does not have cluster-wide privileges. - -When creating a Rook cluster the cluster admin will continue to define the namespace and cluster CRD as follows: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: mycluster ---- -apiVersion: rook.io/v1alpha1 -kind: Cluster -metadata: - name: myrookcluster - namespace: mycluster - ... -``` - -In addition we will require that the cluster-admin define a service account and role binding as follows: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-cluster - namespace: mycluster ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-cluster - namespace: mycluster -rules: -- apiGroups: [""] - resources: ["configmaps"] - verbs: [ "get", "list", "watch", "create", "update", "delete" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-cluster - namespace: mycluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-system -subjects: -- kind: ServiceAccount - name: rook-system - namespace: rook-system ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-system - namespace: mycluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-cluster - namespace: rook-cluster -subjects: -- kind: ServiceAccount - name: rook-cluster - namespace: mycluster -``` - -This will grant the `rook-system` service account access to the new namespace and also setup a least privileged service account `rook-cluster` to be used for pods in this namespace that need K8S api access. - -With this approach `rook-system` will only have access to namespaces nominated by the cluster admin. Also we will no longer create any service accounts or namespaces enabling admins to set stable policies and quotas. - -Also all rook pods except the rook operator pod should run using `rook-cluster` service account in the namespace they're in. - -### Supporting common namespaces - -Finally, we should support running multiple rook clusters in the same namespaces. While namespaces are a great organizational unit for pods etc. they are also a unit of policy and quotas. While we can force the cluster admin to go to an approach where they need to manage multiple namespaces, we would be better off if we give the option to cluster admin decide how they use namespace. - -For example, it should be possible to run rook-operator, rook-agent, and multiple independent rook clusters in a single namespace. This is going to require setting a prefix for pod names and other resources that could collide. - -The following should be possible: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: myrook ---- -apiVersion: rook.io/v1alpha1 -kind: Cluster -metadata: - name: red - namespace: mycluster - ... ---- -apiVersion: rook.io/v1alpha1 -kind: Cluster -metadata: - name: blue - namespace: mycluster - ... -``` diff --git a/design/ceph/storage-class-device-set.md b/design/ceph/storage-class-device-set.md deleted file mode 100644 index faae67e98..000000000 --- a/design/ceph/storage-class-device-set.md +++ /dev/null @@ -1,504 +0,0 @@ -# Rook StorageClassDeviceSets - -**Target version**: Rook 1.1 - -## Background - -The primary motivation for this feature is to take advantage of the mobility of -storage in cloud-based environments by defining storage based on sets of devices -consumed as block-mode PVCs. In environments like AWS you can request remote -storage that can be dynamically provisioned and attached to any node in a given -Availability Zone (AZ). The design can also accommodate non-cloud environments -through the use of local PVs. - -## StorageClassDeviceSet struct - -```go -struct StorageClassDeviceSet { - Name string // A unique identifier for the set - Count int // Number of devices in this set - Resources v1.ResourceRequirements // Requests/limits for the devices - Placement rook.Placement // Placement constraints for the devices - Config map[string]string // Provider-specific device configuration - volumeClaimTemplates []v1.PersistentVolumeClaim // List of PVC templates for the underlying storage devices -} -``` - -A provider will be able to use the `StorageClassDeviceSet` struct to describe -the properties of a particular set of `StorageClassDevices`. In this design, the -notion of a "`StorageClassDevice`" is an abstract concept, separate from -underlying storage devices. There are three main aspects to this abstraction: - -1. A `StorageClassDevice` is both storage and the software required to make the -storage available and manage it in the cluster. As such, the struct takes into -account the resources required to run the associated software, if any. -1. A single `StorageClassDevice` could be comprised of multiple underlying -storage devices, specified by having more than one item in the -`volumeClaimTemplates` field. -1. Since any storage devices that are part of a `StorageClassDevice` will be -represented by block-mode PVCs, they will need to be associated with a Pod so -that they can be attached to cluster nodes. - -A `StorageClassDeviceSet` will have the following fields associated with it: - -* **name**: A name for the set. **[required]** -* **count**: The number of devices in the set. **[required]** -* **resources**: The CPU and RAM requests/limits for the devices. Default is no - resource requests. -* **placement**: The placement criteria for the devices. Default is no - placement criteria. -* **config**: Granular device configuration. This is a generic - `map[string]string` to allow for provider-specific configuration. -* **volumeClaimTemplates**: A list of PVC templates to use for provisioning the - underlying storage devices. - -An entry in `volumeClaimTemplates` must specify the following fields: - * **resources.requests.storage**: The desired capacity for the underlying - storage devices. - * **storageClassName**: The StorageClass to provision PVCs from. Default would - be to use the cluster-default StorageClass. - -## Example Workflow: rook-ceph OSDs - -The CephCluster CRD could be extended to include a new field: -`spec.storage.StorageClassDeviceSets`, which would be a list of one or more -`StorageClassDeviceSets`. If elements exist in this list, the CephCluster -controller would then create enough PVCs to match each `StorageClassDeviceSet`'s -`Count` field, attach them to individual OsdPrepare Jobs, then attach them to -OSD Pods once the Jobs are completed. For the initial implementation, only one -entry in `volumeClaimTemplates` would be supported, if only to tighten the scope -for an MVP. - -The PVCs would be provisioned against a configured or default StorageClass. It -is recommended that the admin setup a StorageClass with `volumeBindingMode: -WaitForFirstConsumer` set. - -If the admin wishes to control device placement, it will be up to them to make -sure the desired nodes are labeled properly to ensure the Kubernetes scheduler -will distribute the OSD Pods based on Placement criteria. - -In keeping with current Rook-Ceph patterns, the **resources** and **placement** -for the OSDs specified in the `StorageClassDeviceSet` would override any -cluster-wide configurations for OSDs. Additionally, other conflicting -configurations parameters in the CephCluster CRD,such as `useAllDevices`, will -be ignored by device sets. - -### OSD Deployment Behavior - -While the current strategy of deploying OSD Pods as individual Kubernetes -Deployments, some changes to the deployment logic would need to change. The -workflow would look something like this: - -1. Get all matching OSD PVCs -1. Create any missing OSD PVCs -1. Get all matching OSD Deployments -1. Check that all OSD Deployments are using valid OSD PVCs - * If not, probably remove the OSD Deployment? - * Remove any PVCs used by OSD Deployments from the list of PVCs to be - worked on -1. Run an OsdPrepare Job on all unused and uninitialized PVCs - * This would be one Job per PVC -1. Create an OSD Deployment for each unused but initialized PVC - * Deploy OSD with `ceph-volume` if available. - * If PV is not backed by LV, create a LV in this PV. - * If PV is backed by LV, use this PV as is. - -### Additional considerations for local storage - -This design can also be applied to non-cloud environments. To take advantage of -this, the admin should deploy the -[sig-storage-local-static-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) -to create local PVs for the desired local storage devices and then follow the -recommended directions for [local -PVs](https://kubernetes.io/blog/2019/04/04/kubernetes-1.14-local-persistent-volumes-ga/). - -### Possible Implementation: DriveGroups - -Creating real-world OSDs is complex: - -* Some configurations deploy multiple OSDs on a single drive -* Some configurations are using more than one drive for a single OSD. -* Deployments often look similar on multiple hosts. -* There are some advanced configurations possible, like encrypted drives. - -All of these setups are valid real-world configurations that need to be -supported. - -The Ceph project defines a data structure that allows defining groups of drives -to be provisioned in a specific way by ceph-volume: Drive Groups. Drive Groups -were originally designed to be ephemeral, but it turns out that orchestrators -like DeepSea store them permanently in order to have a source of truth when -(re-)provisioning OSDs. Also, Drive Groups were originally designed to be host -specific. But the concept of hosts is not really required for the Drive Group -data structure make sense, as they only select a subset of a set of aviailble -drives. - -DeepSea has a documentation of [some example drive -groups](https://github.com/SUSE/DeepSea/wiki/Drive-Groups#example-drive-group-files). -A complete specification is documented in the [ceph -documentation](http://docs.ceph.com/docs/master/mgr/orchestrator_modules/#orchestrator.DriveGroupSpec). - - -#### DeviceSet vs DriveGroup - -A DeviceSet to provision 8 OSDs on 8 drives could look like: - -```yaml -name: "my_device_set" -count: 8 -``` - -The Drive Group would look like so: - -```yaml -host_pattern: "hostname1" -data_devices: - count: 8 -``` - -A Drive Group with 8 OSDs using a shared fast drive could look similar to this: - -```yaml -host_pattern: "hostname1" -data_devices: - count: 8 - model: MC-55-44-XZ -db_devices: - model: NVME -db_slots: 8 -``` -#### ResourceRequirements and Placement - -Drive Groups don't yet provide orchestrator specific extensions, like resource -requirements or placement specs, but that could be added trivially. Also a name -could be added to Drive Groups. - -### OSD Configuration Examples - -Given the complexity of this design, here are a few examples to showcase -possible configurations for OSD `StorageClassDeviceSets`. - -#### Example 1: AWS cross-AZ - -```yaml -type: CephCluster -name: cluster1 -... -spec: - ... - storage: - ... - storageClassDeviceSets: - - name: cluster1-set1 - count: 3 - resources: - requests: - cpu: 2 - memory: 4Gi - placement: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "rook.io/cluster" - operator: In - values: - - cluster1 - topologyKey: "failure-domain.beta.kubernetes.io/zone" - volumeClaimTemplates: - - spec: - resources: - requests: - storage: 5Ti - storageClassName: gp2-ebs -``` - -In this example, `podAntiAffinity` is used to spread the OSD Pods across as many -AZs as possible. In addition, all Pods would have to be given the label -`rook.io/cluster=cluster1` to denote they belong to this cluster, such that the -scheduler will know to try and not schedule multiple Pods with that label on the -same nodes if possible. The CPU and memory requests would allow the scheduler to -know if a given node can support running an OSD process. - -It should be noted, in the case where the only nodes that can run a new OSD Pod -are nodes with OSD Pods already on them, one of those nodes would be -selected. In addition, EBS volumes may not cross between AZs once created, so a -given Pod is guaranteed to always be limited to the AZ it was created in. - -#### Example 2: Single AZ - -```yaml -type: CephCluster -name: cluster1 -... -spec: - ... - resources: - osd: - requests: - cpu: 2 - memory: 4Gi - storage: - ... - storageClassDeviceSet: - - name: cluster1-set1 - count: 3 - placement: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "failure-domain.beta.kubernetes.io/zone" - operator: In - values: - - us-west-1a - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "rook.io/cluster" - operator: In - values: - - cluster1 - volumeClaimTemplates: - - spec: - resources: - requests: - storage: 5Ti - storageClassName: gp2-ebs - - name: cluster1-set2 - count: 3 - placement: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "failure-domain.beta.kubernetes.io/zone" - operator: In - values: - - us-west-1b - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "rook.io/cluster" - operator: In - values: - - cluster1 - volumeClaimTemplates: - - spec: - resources: - requests: - storage: 5Ti - storageClassName: gp2-ebs - - name: cluster1-set3 - count: 3 - placement: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "failure-domain.beta.kubernetes.io/zone" - operator: In - values: - - us-west-1c - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "rook.io/cluster" - operator: In - values: - - cluster1 - volumeClaimTemplates: - - spec: - resources: - requests: - storage: 5Ti - storageClassName: gp2-ebs -``` - -In this example, we've added a `nodeAffinity` to the `placement` that restricts -all OSD Pods to a specific AZ. This case is only really useful if you specify -multiple `StorageClassDeviceSets` for different AZs, so that has been done here. -We also specify a top-level `resources` definition, since we want that to be the -same for all OSDs in the device sets. - -#### Example 3: Different resource needs - -```yaml -type: CephCluster -name: cluster1 -... -spec: - ... - storage: - ... - storageClassDeviceSets: - - name: cluster1-set1 - count: 3 - resources: - requests: - cpu: 2 - memory: 4Gi - placement: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "rook.io/cluster" - operator: In - values: - - cluster1 - topologyKey: "failure-domain.beta.kubernetes.io/zone" - volumeClaimTemplates: - - spec: - resources: - requests: - storage: 5Ti - storageClassName: gp2-ebs - - name: cluster1-set2 - count: 3 - resources: - requests: - cpu: 2 - memory: 8Gi - placement: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "rook.io/cluster" - operator: In - values: - - cluster1 - topologyKey: "failure-domain.beta.kubernetes.io/zone" - volumeClaimTemplates: - - spec: - resources: - requests: - storage: 10Ti - storageClassName: gp2-ebs -``` - -In this example, we have two `StorageClassDeviceSets` with different capacities -for the devices in each set. The devices with larger capacities would require a -greater amount of memory to operate the OSD Pods, so that is reflected in the -`resources` field. - -#### Example 4: Simple local storage - -```yaml -type: CephCluster -name: cluster1 -... -spec: - ... - storage: - ... - storageClassDeviceSet: - - name: cluster1-set1 - count: 3 - volumeClaimTemplates: - - spec: - resources: - requests: - storage: 1 - storageClassName: cluster1-local-storage -``` - -In this example, we expect there to be nodes that are configured with one local -storage device each, but they would not be specified in the `nodes` list. Prior -to this, the admin would have had to deploy the local-storage-provisioner, -created local PVs for each of the devices, and created a StorageClass to allow -binding of the PVs to PVCs. At this point, the same workflow would be the same -as the cloud use case, where you simply specify a count of devices, and a -template with a StorageClass. Two notes here: - -1. The count of devices would need to match the number of existing devices to -consume them all. -1. The capacity for each device is irrelevant, since we will simply consume the -entire storage device and get that capacity regardless of what is set for the -PVC. - -#### Example 5: Multiple devices per OSD - -```yaml -type: CephCluster -name: cluster1 -... -spec: - ... - storage: - ... - storageClassDeviceSet: - - name: cluster1-set1 - count: 3 - config: - metadataDevice: "/dev/rook/device1" - volumeClaimTemplates: - - metadata: - name: osd-data - spec: - resources: - requests: - storage: 1 - storageClassName: cluster1-hdd-storage - - metadata: - name: osd-metadata - spec: - resources: - requests: - storage: 1 - storageClassName: cluster1-nvme-storage -``` - -In this example, we are using NVMe devices to store OSD metadata while having -HDDs store the actual data. We do this by creating two StorageClasses, one for -the NVMe devices and one for the HDDs. Then, if we assume our implementation -will always provide the block devices in a deterministic manner, we specify the -location of the NVMe devices (as seen in the container) as the `metadataDevice` -in the OSD config. We can guarantee that a given OSD Pod will always select two -devices that are on the same node if we configure `volumeBindingMode: -WaitForFirstConsumer` in the StorageClasses, as that allows us to offload that -logic to the Kubernetes scheduler. Finally, we also provide a `name` field for -each device set, which can be used to identify which set a given PVC belongs to. - -#### Example 6: Additional OSD configuration - -```yaml -type: CephCluster -name: cluster1 -... -spec: - ... - storage: - ... - storageClassDeviceSet: - - name: cluster1-set1 - count: 3 - config: - osdsPerDevice: "3" - volumeClaimTemplates: - - spec: - resources: - requests: - storage: 5Ti - storageClassName: cluster1-local-storage -``` - -In this example, we show how we can provide additional OSD configuration in the -`StorageClassDeviceSet`. The `config` field is just a `map[string]string` type, -so anything can go in this field. diff --git a/design/ceph/stretchcluster.png b/design/ceph/stretchcluster.png deleted file mode 100644 index 21076140d..000000000 Binary files a/design/ceph/stretchcluster.png and /dev/null differ diff --git a/design/ceph/update-osds-in-parallel.md b/design/ceph/update-osds-in-parallel.md deleted file mode 100644 index 2cc4f8cac..000000000 --- a/design/ceph/update-osds-in-parallel.md +++ /dev/null @@ -1,172 +0,0 @@ -# Updating OSDs in parallel -**Targeted for v1.6** - -## Background -In clusters with large numbers of OSDs, it can take a very long time to update all of the OSDs. This -occurs on updates of Rook and Ceph both for major as well as the most minor updates. To better -support large clusters, Rook should be able to update (and upgrade) multiple OSDs in parallel. - - -## High-level requirements - -### Ability to set a maximumum number of OSDs to be updated simultaneously -In the worst (but unlikely) case, all OSDs which are updated for a given parallel update operation -might fail to come back online after they are updated. Users may wish to limit the number of OSDs -updated in parallel in order to avoid too many OSDs failing in this way. - -### Cluster growth takes precedence over updates -Adding new OSDs to a cluster should occur as quickly as possible. This allows users to make use of -newly added storage as quickly as possible, which they may need for critical applications using the -underlying Ceph storage. In some degraded cases, adding new storage may be necessary in order to -allow currently-running Ceph OSDs to be updated without experiencing storage cluster downtime. - -This does not necessarily mean that adding new OSDs needs to happen before updates. - -This prioritization might delay updates significantly since adding OSDs not only adds capacity to -the Ceph cluster but also necessitates data rebalancing. Rebalancing generates data movement which -needs to settle for updates to be able to proceed. - -### OSD updates should not starve other resources of updates -For Ceph cluster with huge numbers of OSDs, Rook's process to update OSDs should not starve other -resources out of the opportunity to get configuration updates. - - -## Technical implementation details - -### Changes to Ceph -The Ceph manager (mgr) will add functionality to allow querying the maximum number of OSDs that are -okay to stop safely. The command will take an initial OSD ID to include in the results. It should -return error if the initial OSD cannot be stopped safely. Otherwise it returns a list of 1 or more -OSDs that can be stopped safely in parallel. It should take a `--max=` parameter that limits -the number of OSDs returned. - -It will look similar to this on the command line `ceph osd ok-to-stop $id --max $int`. - -The command will have an internal algorithm that follows the flow below: -1. Query `ok-to-stop` for the "seed" OSD ID. This represents the CRUSH hierarchy bucket at the "osd" - (or "device") level. -2. If the previous operation reports that it safe to update, batch query `ok-to-stop` for all OSDs - that fall under the CRUSH bucket one level up from the current level. -3. Repeat step 3 moving up the CRUSH hierarchy until one of the following two conditions: - 1. The number of OSDs in the batch query is greater than or equal to the `max` parameter, OR - 2. It is no longer `ok-to-stop` all OSDs in the CRUSH bucket. -4. Update OSD Deployments in parallel for the last CRUSH bucket where it was `ok-to-stop` the OSDs. - - If there are more OSDs in the CRUSH bucket than allowed by the user that are okay to stop, - return only the `max` number of OSD IDs from the CRUSH bucket. - -The pull request for this feature in the Ceph project can be found at -https://github.com/ceph/ceph/pull/39455. - -### Rook Operator Workflow -1. Build an "existence list" of OSDs which already have Deployments created for them. -1. Build an "update queue" of OSD Deployments which need updated. -1. Start OSD prepare Jobs as needed for OSDs on PVC and OSDs on nodes. - 1. Note which prepare Jobs are started -1. Provision Loop - 1. If all prepare Jobs have been completed and the update queue is empty, stop Provision Loop. - 1. If there is a `CephCluster` update/delete, stop Provision Loop with a special error. - 1. Create OSDs: if a prepare Job has completed, read the results. - 1. If any OSDs reported by prepare Job do not exist in the "existence list", create them. - 1. Mark the prepare Job as completed. - 1. Restart Provision Loop. - 1. Update OSDs: if the update queue is not empty, update a batch of OSD Deployments. - 1. Query `ceph osd ok-to-stop --max=` for each OSD in the update queue until - a list of OSD IDs is returned. - - If no OSDs in the update queue are okay to stop, Restart Provision Loop. - 1. Update all of the OSD Deployments in parallel. - 1. Record any failures. - 1. Remove all OSDs from the batch from the update queue (even failures). - 1. Restart Provision Loop. -1. If there are any recorded errors/failures, return with an error. Otherwise return success. - -### How to make sure Ceph cluster updates are not starved by OSD updates -Because [cluster growth takes precedence over updates](#cluster-growth-takes-precedence-over-updates), -it could take a long time for all OSDs in a cluster to be updated. In order for Rook to have -opportunity to reconcile other components of a Ceph cluster's `CephCluster` resource, Rook should -ensure that the OSD update reconciliation does not create a scenario where the `CephCluster` cannot -be modified in other ways. - -https://github.com/rook/rook/pull/6693 introduced a means of interrupting the current OSD -orchestration to handle newer `CephCluster` resource changes. This functionality should remain so -that user changes to the `CephCluster` can begin reconciliation quickly. The Rook Operator should -stop OSD orchestration on any updates to the `CephCluster` spec and be able to resume OSD -orchestration with the next reconcile. - -### How to build the existence list -List all OSD Deployments belonging to the Rook cluster. Build a list of OSD IDs matching the OSD -Deployments. Record this in a data structure that allows O(1) lookup. - -### How to build the update queue -List all OSD Deployments belonging to the Rook cluster to use as the update queue. All OSDs should -be updated in case there are changes to the CephCluster resource that result in OSD deployments -being updated. - -The minimal information each item in the queue needs is only the OSD ID. The OSD Deployment managed -by Rook can easily be inferred from the OSD ID. - -Note: A previous version of this design planned to ignore OSD Deployments which are already updated. -The plan was to identify OSD Deployments which need updated by looking at the OSD Deployments for: -(1) a `rook-version` label that does not match the current version of the Rook operator AND/OR -(2) a `ceph-version` label that does not match the current Ceph version being deployed. This is an -invalid optimization that does not account for OSD Deployments changing due to CephCluster resource -updates. Instead of trying to optimize, it is better to always update OSD Deplyments and rely on the -lower level update calls to finish quickly when there is no update to apply. - -### User configuration - -#### Modifications to `CephCluster` CRD -Establish a new `updatePolicy` section in the `CephCluster` `spec`. In this section, users can -set options for how OSDs should be updated in parallel. Additionally, we can move some existing -one-off configs related to updates to this section for better coherence. This also allows for -a natural location where future update options can be added. - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephCluster -# ... -spec: - # ... - # Move these to the new updatePolicy but keep them here for backwards compatibility. - # These can be marked deprecated, but do not remove them until CephCluster CRD v2. - skipUpgradeChecks: - continueUpgradeAfterChecksEvenIfNotHealthy: - removeOSDsIfOutAndSafeToDestroy: - - # Specify policies related to updating the Ceph cluster and its components. This applies to - # minor updates as well as upgrades. - updatePolicy: - # skipUpgradeChecks is merely relocated from spec - skipUpgradeChecks: - - # continueUpgradeAfterChecksEvenIfNotHealthy is merely relocated from spec - continueUpgradeAfterChecksEvenIfNotHealthy: - - # allow for future additions to updatePolicy like healthErrorsToIgnore - - # Update policy for OSDs. - osds: - # removeIfOutAndSafeToDestroy is merely relocated from spec (removeOSDsIfOutAndSafeToRemove) - removeIfOutAndSafeToDestroy: - - # Max number of OSDs in the cluster to update at once. Rook will try to update this many OSDs - # at once if it is safe to do so. It will update fewer OSDs at once if it would be unsafe to - # update maxInParallelPerCluster at once. This can be a discrete number or a percentage of - # total OSDs in the Ceph cluster. - # Rook defaults to updating 15% of OSDs in the cluster simultaneously if this value is unset. - # Inspired by Kubernetes apps/v1 RollingUpdateDeployment.MaxUnavailable. - # Note: I think we can hide the information about CRUSH from the user since it is not - # necessary for them to understand that complexity. - maxInParallelPerCluster: -``` - -Default `maxInParallelPerCluster`: Ceph defaults to keeping 3 replicas of an item or 2+1 erasure -coding. It should be impossible to update more than one-third (33.3%) of a default Ceph cluster at -any given time. It should be safe and fairly easy to update slightly less than half of one-third at -once, which rounds down to 16%. 15% is a more round number, so that is chosen instead. - - -## Future considerations -Some users may wish to update OSDs in a particular failure domain or zone completely before moving -onto updates in another zone to minimize risk from updates to a single failure domain. This is out -of scope for this initial design, but we should consider how to allow space to more easily implement -this change when it is needed. \ No newline at end of file diff --git a/design/ceph/upgrade.md b/design/ceph/upgrade.md deleted file mode 100644 index c26b32af2..000000000 --- a/design/ceph/upgrade.md +++ /dev/null @@ -1,173 +0,0 @@ -# **Rook Cluster Upgrades** - -## **Overview** -Over time, new versions with improvements to the Rook software will be released and Rook clusters that have already been deployed should be upgraded to the newly released version. -Being able to keep the deployed software current is an important part of managing the deployment and ensuring its health. -In the theme of Rook's orchestration and management capabilities making the life of storage admins easier, this upgrade process should be both automatic and reliable. -This document will describe a proposed design for the upgrading of Rook software as well as pose questions to the community for feedback so that we can deliver on the goal of automatic and reliable upgrades. - -## **Goal** -In order for software upgrade support in Rook to be considered successful, the goals listed below should be met. -Note that these goals are for a long term vision and are not all necessarily deliverable within the v0.6 release time frame. -* **Automatic:** When a new version of Rook is released and the admin has chosen to start the upgrade, a live cluster should be able to update all its components to the new version without further user intervention. -* **No downtime:** During an upgrade window, there should be **zero** downtime of cluster functionality. - * The upgrade process should be carried out in a rolling fashion so that not all components are being updated simultaneously. -The cluster should be maintained in a healthy state the entire time. -* **Migrations:** Breaking changes, as well as schema and data format changes should be handled through an automated migration processes. -* **Rollback:** In the event that the upgrade is not successful, the Rook software should be rolled back to the previous version and cluster health should be restored. - -## **User Guide** -Until automated upgrade support is available in Rook, we have authored a user guide that walks you through the steps to upgrade the software in a Rook cluster. -Consideration is also provided in the guide for how to verify the cluster remains healthy during and after the upgrade process. -Please refer to the [Rook Upgrade User Guide](../Documentation/ceph-upgrade.md) to learn more about the current Rook upgrade process. - -## **Detailed Design** -The responsibility for performing and orchestrating an upgrade will be handled by an upgrade controller that runs as part of the Rook operator, in the same pod and process (similar to how the Rook volume provisioner is run). -This controller will be responsible for carrying out the sequence of steps for updating each individual Rook component. -Additionally, the controller will monitor cluster and component health during the upgrade process, taking corrective steps to restore health, up to and including a full rollback to the old version. - -### **Prerequisites** -In order for the upgrade controller to begin an upgrade process, the following conditions must be met: -* The cluster should be in a healthy state in accordance with our defined [health verification checks](#upgrade-health-verification). -The upgrade controller should not begin an upgrade if the cluster is currently unhealthy. -* Metadata for pods must be persistent. If config files and other metadata only resides on an ephemeral empty dir for the pods (i.e., `dataDirHostPath` is not set), then the upgrade controller will not perform an upgrade. - -### **General Sequence** -This section describes in a broad sense the general sequence of steps for upgrading a Rook cluster after a new Rook software version is released, e.g. `v0.6.1`. -Note that this sequence is modeled after the [Rook Upgrade User Guide](../Documentation/ceph-upgrade.md), including the cluster health checks described in the [health verification section](../Documentation/ceph-upgrade.md#health-verification). - -#### **Rook System Namespace** -The Rook system namespace contains the single control plane for all Rook clusters in the environment. -This system namespace should be upgraded first before any individual clusters are upgraded. - -**Operator:** The operator pod itself is upgraded first since it is the host of the upgrade controller. -If there is any new upgrade logic or any migration needed, the new version of the upgrade controller would know how to perform it, so it needs to be updated first. -This will be a manual operation by the admin, ensuring that they are ready for their cluster to begin the upgrade process: -```bash -kubectl set image deployment/rook-operator rook-operator=rook/rook:v0.6.1 -``` -This command will update the image field of the operator's pod template, which will then begin the process of the deployment that manages the operator pod to terminate the pod and start a new one running the new version in its place. - -**Agents:** The Rook agents will also be running in the Rook system namespace since they perform operations for all Rook clusters in the environment. -When the operator pod comes up on a newer version than the agents, it will use the Kubernetes API to update the image field of the agent's pod template. -After this update, it will then terminate each agent pod in a rolling fashion so that their managing daemon set will replace them with a new pod on the new version. - -Once the operator and all agent pods are running and healthy on the new version, the administrator is free to begin the upgrade process for each of their Rook clusters. - -#### **Rook Cluster(s)** -1. The Rook operator, at startup after being upgraded, iterates over each Cluster CRD instance and proceeds to verify desired state. - 1. If the Rook system namespace upgrade described above has not yet occurred, then the operator will delay upgrading a cluster until the system upgrade is completed. The operator should never allow a cluster's version to be newer than its own version. -1. The upgrade controller begins a reconciliation to bring the cluster's actual version value in agreement with the desired version, which is the container version of the operator pod. -As each step in this sequence begins/ends, the status field of the cluster CRD will be updated to indicate the progress (current step) of the upgrade process. -This will help the upgrade controller resume the upgrade if it were to be interrupted. -Also, each step should be idempotent so that if the step has already been carried out, there will be no unintended side effects if the step is resumed or run again. -1. **Mons:** The monitor pods will be upgraded in a rolling fashion. **For each** monitor, the following actions will be performed by the upgrade controller: - 1. The `image` field of the pod template spec will be updated to the new version number. - Then the pod will be terminated, allowing the replica set that is managing it to bring up a new pod on the new version to replace it. - 1. The controller will verify that the new pod is on the new version, in the `Running` state, and that the monitor returns to `in quorum` and has a Ceph status of `OK`. - The cluster health will be verified as a whole before moving to the next monitor. -1. **Ceph Managers:** The Ceph manager pod will be upgraded next by updating the `image` field on the pod template spec. -The deployment that is managing the pod will then terminate it and start a new pod running the new version. - 1. The upgrade controller will verify that the new pod is on the new version, in the `Running` state and that the manager instance shows as `Active` in the Ceph status output. -1. **OSDs:** The OSD pods will be upgraded in a rolling fashion after the monitors. **For each** OSD, the following actions will take place: - 1. The `image` field of the pod template spec will be updated to the new version number. - 1. The lifecycle management of OSDs can be done either as a whole by a single daemon set or individually by a replica set per OSD. - In either case, each individual OSD pod will be terminated so that its managing controller will respawn a new pod on the new version in its place. - 1. The controller will verify that each OSD is running the new version and that they return to the `UP` and `IN` statuses. - Placement group health will also be verified to ensure all PGs return to the `active+clean` status before moving on. -1. If the user has installed optional components, such as object storage (**RGW**) or shared file system (**MDS**), they will also be upgraded to the new version. -They are both managed by deployments, so the upgrade controller will update the `image` field in their pod template specs which then causes the deployment to terminate old pods and start up new pods on the new versions to replace them. - 1. Cluster health and object/file functionality will be verified before the upgrade controller moves on to the next instances. - -### **Upgrade Health Verification** -As mentioned previously, the manual health verification steps found in the [upgrade user guide](../Documentation/ceph-upgrade.md#health-verification) will be used by the upgrade controller, in an automated fashion, to ensure the cluster is healthy before proceeding with the upgrade process. -This approach of upgrading one component, verifying health and stability, then upgrading the next component can be viewed as a form of [canary deployment](https://kubernetes.io/docs/concepts/cluster-administration/manage-deployment/#canary-deployments). - -Here is a quick summary of the standard health checks the upgrade controller should perform: -* All pods are in the `Running` state and have few, if any, restarts - * No pods enter a crash loop backoff -* Overall status: The overall cluster status is `OK` and there are no warning or error status messages displayed. -* Monitors: All of the monitors are `in quorum` and have individual status of `OK`. -* OSDs: All OSDs are `UP` and `IN`. -* MGRs: All Ceph managers are in the `Active` state. -* Placement groups: All PGs are in the `active+clean` state. - -#### Pod Readiness/Liveness Probes -To further supplement the upgrade controller's ability to determine health, as well as facilitate the built-in Kubernetes upgrade capabilities, the Rook pods should implement [liveness and readiness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) when possible. -For pods that implement these probes, the upgrade controller can check them as another data point in determining if things are healthy before proceeding with the upgrade. - -### **Rollback** -If the upgrade controller observes the cluster to be in an unhealthy state (that does not recover) during the upgrade process, it will need to roll back components in the cluster to the previous stable version. -This is possible due to the rolling/canary approach of the upgrade controller. -To roll a component back to the previous version, the controller will simply set the `image` field of the pod template spec to the previous version then terminate each pod to allow their managing controller to start a new pod on the *old* version to replace it. - -The hope is that cluster health and stability will be restored once it has been rolled back to the previous version, but it is possible that simply rolling back the version may not solve all cases of cluster instability that begin during an upgrade process. -We will need more hands on experience with cluster upgrades in order to improve both upgrade reliability and rollback effectiveness. - -### **Upgrade Tools** -We should consider implementing status commands that will help the user monitor and verify the upgrade progress and status. -Some examples for potential new commands would be: -* `rook versions`: This command would return the version of all Rook components in the cluster, so they can see at a glance which components have finished upgrading. -This is similar to the [`ceph versions` command](http://ceph.com/community/new-luminous-upgrade-complete/). -* `rook status --upgrade`: This command would return a summary, retrieved from the upgrade controller, of the most recent completed steps and status of the upgrade that it is currently working on. - -### **Migrations and Breaking Changes** -When a breaking change or a data format change occurs, the upgrade controller will have the ability to automatically perform the necessary migration steps during the upgrade process. -While migrations are possible, they are certainly not desirable since they require extra upgrade logic to be written and tested, as well as providing new potential paths for failure. -Going forward, it will be important for the Rook project to increase our discipline regarding the introduction of breaking changes. -We should be **very** careful about adding any new code that requires a migration during the update process. - -### **Kubernetes Built-in Support** -Kubernetes has some [built-in support for rolling updates](https://kubernetes.io/docs/tasks/run-application/rolling-update-replication-controller/) with the `kubectl rolling-update` command. -Rook can potentially take advantage of this support for our replication controllers that have multiple stateless pods deployed, such as RGW. -This support is likely not a good fit for some of the more critical and sensitive components in the cluster, such as monitors, that require careful observation to ensure health is maintained and quorum is reestablished. - -If the upgrade controller uses the built-in rolling update support for certain stateless components, it should still verify all cluster health checks before proceeding with the next set of components. - -### **Synchronization** -The upgrade process should be carefully orchestrated in a controlled manner to ensure reliability and success. -Therefore, there should be some locking or synchronization that can ensure that while an upgrade is in progress, other changes to the cluster cannot be made. -For example, if the upgrade controller is currently rolling out a new version, it should not be possible to modify the cluster CRD with other changes, such as removing a node from the cluster. -This could be done by the operator stopping its watches on all CRDs or it could choose to simply return immediately from CRD events while the upgrade is in progress. - -There are also some mechanisms within Ceph that can help the upgrade proceed in a controlled manner. -For example, the `noout` flag can be set in the Ceph cluster, indicating that while OSDs will be taken down to upgrade them, they should not be marked out of the cluster, which would trigger unnecessary recovery operations. -The [Ceph Luminous upgrade guide](http://ceph.com/releases/v12-1-4-luminous-rc-released/#upgrading) recommends setting the `noout` flag for the duration of the upgrade. -Details of the `noout` flag can be found in the [Ceph documentation](http://docs.ceph.com/docs/giant/rados/troubleshooting/troubleshooting-osd/#stopping-w-out-rebalancing). - -### **Scalability** -For small clusters, the process of upgrading one pod at a time should be sufficient. -However, for much later clusters (100+ nodes), this would result in an unacceptably long upgrade window duration. -The upgrade controller should be able to batch some of its efforts to upgrade multiple pods at once in order to finish an upgrade in a more timely manner. - -This batching should not be done across component types (e.g. upgrading mons and OSDs at the same time), those boundaries where the health of the entire cluster is verified should still exist. -This batching should also not be done for monitors as there are typically only a handful of monitors servicing the entire cluster and it is not recommended to have multiple monitors down at the same time. - -But, the upgrade controller should be able to slowly increase its component update batch size as it proceeds through some other component types, such as OSDs, MDS and RGW. -For example, in true canary deployment fashion, a single OSD could be upgraded to the new version and OSD/cluster health will be verified. -Then two OSDs could be updated at once and verification occurs again, followed by four OSDs, etc. up to a reasonable upper bound. -We do not want too many pods going down at one time, which could potentially impact cluster health and functionality, so a sane upper bound will be important. - -### **Troubleshooting** -If an upgrade does not succeed, especially if the rollback effort also fails, we want to have some artifacts that are accessible by the storage administrator to troubleshoot the issue or to reach out to the Rook community for help. -Because the upgrade process involves terminating pods and starting new ones, we need some strategies for investigating what happened to pods that may no longer be alive. -Listed below are a few techniques for accessing debugging artifacts from pods that are no longer running: -* `kubectl logs --previous ${POD_NAME} ${CONTAINER_NAME}` allows you to retrieve logs from a previous instance of a pod (e.g. a pod that crashed but is not yet terminated) -* `kubectl get pods --show-all=true` will list all pods, including older versioned pods that were terminated in order to replace them with pods running the newer version. -* The Rook operator logs (which host the upgrade controller output) should be thorough and verbose about the following: - * The sequence of actions it took during the upgrade - * The replication controllers (e.g. daemon set, replica set, deployment) that it modified and the pod names that it terminated - * All health check status and output it encountered - -## **Next Steps** -We have demonstrated that Rook is upgradable with the manual process outlined in the [Rook Upgrade User Guide](../Documentation/ceph-upgrade.md). -Fully automated upgrade support has been described within this design proposal, but will likely need to be implemented in an iterative process, with lessons learned along the way from pre-production field experience. - -The next step will be to implement the happy path where the upgrade controller automatically updates all Rook components in the [described sequence](#general-sequence) and stops immediately if any health checks fail and the cluster does not return to a healthy functional state. - -Handling failure cases with rollback as well as handling migrations and breaking changes will likely be implemented in future milestones, along with reliability and stability improvements from field and testing experience. - -## **Open Questions** -1. What other steps can be taken to restore cluster health before resorting to rollback? -1. What do we do if rollback doesn't succeed? -1. What meaningful liveness/readiness probes can our pods implement? diff --git a/design/common/multiple-storage-types-support.md b/design/common/multiple-storage-types-support.md index 51ec8c841..ac2dc806f 100644 --- a/design/common/multiple-storage-types-support.md +++ b/design/common/multiple-storage-types-support.md @@ -175,7 +175,7 @@ Our `golang` strongly typed definitions would look like the following, where the `types.go`: ```go -package v1alpha1 // "github.com/rook/rook/pkg/apis/ceph.rook.io/v1alpha1" +package v1alpha1 // "github.com/rook/cassandra/pkg/apis/ceph.rook.io/v1alpha1" import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -313,9 +313,9 @@ Other backend operators could do a similar thing for their node/device level con As previously mentioned, the `rook.io` API group will also define some other useful `*Spec` types: -* `PlacementSpec`: Defines placement requirements for components of the storage provider, such as node and pod affinity. This is similar to the existing [Ceph focused `PlacementSpec`](https://github.com/rook/rook/blob/release-0.7/pkg/apis/rook.io/v1alpha1/types.go#L141), but in a generic way that is reusable by all storage providers. A `PlacementSpec` will essentially be a map of placement information structs that are indexed by component name. +* `PlacementSpec`: Defines placement requirements for components of the storage provider, such as node and pod affinity. This is similar to the existing [Ceph focused `PlacementSpec`](https://github.com/rook/cassandra/blob/release-0.7/pkg/apis/rook.io/v1alpha1/types.go#L141), but in a generic way that is reusable by all storage providers. A `PlacementSpec` will essentially be a map of placement information structs that are indexed by component name. * `NetworkSpec`: Defines the network configuration for the storage provider, such as `hostNetwork`. -* `ResourceSpec`: Defines the resource usage of the provider, allowing limits on CPU and memory, similar to the existing [Ceph focused `ResourceSpec`](https://github.com/rook/rook/blob/release-0.7/pkg/apis/rook.io/v1alpha1/types.go#L85). +* `ResourceSpec`: Defines the resource usage of the provider, allowing limits on CPU and memory, similar to the existing [Ceph focused `ResourceSpec`](https://github.com/rook/cassandra/blob/release-0.7/pkg/apis/rook.io/v1alpha1/types.go#L85). #### Additional Types diff --git a/design/nfs/nfs-controller-runtime.md b/design/nfs/nfs-controller-runtime.md deleted file mode 100644 index d65ef1011..000000000 --- a/design/nfs/nfs-controller-runtime.md +++ /dev/null @@ -1,181 +0,0 @@ -# Implement controller-runtime in Rook NFS Operator - -## Background - -This proposal is to implement controller-runtime in Rook NFS Operator to improve reliability of the operator itself. Currently, Rook nfs-operator only simply watches an event of CustomResource from an informer using simple [WatchCR][rook-watchcr] method which has limited functionality such as event can not be re-queued if failed. To implement controller-runtime is expected to overcome the shortcomings of current implementation. - -## Why controller-runtime? - -[Controller-runtime][controller-runtime] is widely used for writing Kubernetes operators. It is also leveraged by Kubebuilder and Operator SDK. Controller-runtime consists of several packages that have their respective responsibilities in building operators. The main function of controller-runtime is - -- **Manager:** Runnable for the operator with leader election option. It is also provides shared dependencies such as clients, caches, schemes, etc. -- **Controller:** Provides types and functions for building Controllers which ensure for any given object, the actual state matches the desired state which called `Reconciling` process. -- **Admission Webhook:** Provides methods to build an admission webhook (both Mutating Admission Webhook and Validating Admission Webhook) and bootstrap a webhook server. -- **Envtest:** Provides libraries for integration testing by starting a local control plane (etcd and kube-apiserver). -- **Matrics:** Provides metrics utility for controller. - -## Implementation - -The implementation of this proposal is to rewrite NFS Operator controller to use controller-runtime and introduce the validation admission webhook using controller-runtime for NFS Operator. - -### Controller & Reconciliation - -Operators are Kubernetes extensions that use custom resources to manage applications and their components using the Kubernetes APIs and kubectl tooling. Operators follow the Kubernetes controller principles. The process in which the actual state of the object (both cluster object and external object) will be matching the desired state which called *Reconciliation* process in the controller-runtime. - -The current implementation is the operator watch an event (create, update and delete) of CustomResource and will be handled by registered function in `ResourceEventHandlerFuncs` which every event has its own handler but only the create handler that implemented. - -Controller-runtime introduces an interface called [Reconciler][Controller-runtime-reconciler] that will ensure the state of the system matches what is specified by the user in the object at the time the Reconciler is called. Reconciler responds to generic events so it will contain all of the business logic of a Controller (create, update, and delete). What have to do here is only to implement the [Reconcile][Controller-runtime-reconcile] method of the interface in the controller. The controller-runtime also have utility functions for creating and updating an object called [CreateOrUpdate][controller-runtime-createorupdate] which will make easier to handling update of an object. - -Since the implementation controller using controller-runtime only changes the logic of the controller, so the deployment process will be like current implementation. However, the deployment process of admission webhook using controller-runtime will have additional steps as explained below. - -### Validation - -CustomResource validation in the operator can be done through the Controller itself. However, the operator pattern has two common types to validate the CustomResource. - -- **Syntactic validation** By defining OpenAPI validation rules. -- **Semantic Validation** By creating ValidatingAdmissionConfiguration and Admission Webhook. - -The current implementation only validates the CustomResource in the controller and just gives an error log in the operator stdout if the given resource is invalid. In this implementation will also cover the CustomResouce validation both though *Syntactic validation* and *Semantic Validation* and also give an improvement validation in the controller. - -![validation-webhook-flow](../../Documentation/media/nfs-webhook-validation-flow.png "Validation Webhook Flow") - -To implement *Syntactic validation* is only by defining OpenAPI validation rules. Otherwise, the *Semantic Validation* implementation is a bit more complicated. Fortunately, controller-runtime provides an awesome package that helpfully to create admission webhook such as bootstraping webhook server, registering handler, etc. Just like controller that have [Reconciler][controller-runtime-reconciler] interface, admission webhook in controller-runtime also have [Validator][controller-runtime-validator] interface that handle the operations validation. - -> Controller-runtime also provide [Defaulter][controller-runtime-defaulter] interface to handle mutation webhook. - -Since the webhook server must be served through TLS, a valid TLS certificate will be required. In this case, we can depend on [cert-manager][cert-manager]. The cert-manager component can be deployed as usual [cert-manager-installation](cert-manager-installation) no matter which namespace the cert-manager component lives. But keep in mind that *Certificate* must be in the same namespace as webhook-server. - -![validation-webhook-deployment](../../Documentation/media/nfs-webhook-deployment.png "Validation Webhook Deployment") - -Example self signed certificate. - -```yaml ---- -apiVersion: cert-manager.io/v1alpha2 -kind: Certificate -metadata: - name: rook-nfs-webhook-cert - namespace: rook-nfs-system -spec: - dnsNames: - - rook-nfs-webhook.rook-nfs-system.svc - - rook-nfs-webhook.rook-nfs-system.svc.cluster.local - issuerRef: - kind: Issuer - name: rook-nfs-selfsigned-issuer - secretName: rook-nfs-webhook-cert ---- -apiVersion: cert-manager.io/v1alpha2 -kind: Issuer -metadata: - name: rook-nfs-selfsigned-issuer - namespace: rook-nfs-system -spec: - selfSigned: {} -``` - -And the ValidatingAdmissionConfiguration will look like - -```yaml ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - annotations: - cert-manager.io/inject-ca-from: rook-nfs-system/rook-nfs-webhook-cert - creationTimestamp: null - name: rook-nfs-validating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: rook-nfs-webhook - namespace: rook-nfs-system - path: /validate-nfs-rook-io-v1alpha1-nfsserver - failurePolicy: Fail - name: validation.nfsserver.nfs.rook.io - rules: - - apiGroups: - - nfs.rook.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - nfsservers -``` - -By providing [cert-manager.io/inject-ca-from][cert-manager-cainjector] annotation, `cert-manager` will replace `.clientConfig.caBundle` with appropriate certificate. When constructing controller-runtime using [Builder][controller-runtime-webhook-builder] controller-runtime will serving the validation handler on `/validate-group-version-kind` and mutation handler on `/mutate-group-version-kind`. So `.clientConfig.service.path` must be have correct value. And the implementation is the admission webhook server will be deployed independently. The `Semantic Validation` will be optional and users can enable or disable this validation by deploying the admission webhook configuration and server or not. The example manifests to deploy the admission webhook server will look like this. - -```yaml ---- -kind: Service -apiVersion: v1 -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system -spec: - selector: - app: rook-nfs-webhook - ports: - - port: 443 - targetPort: webhook-server ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system - labels: - app: rook-nfs-webhook -spec: - replicas: 1 - selector: - matchLabels: - app: rook-nfs-webhook - template: - metadata: - labels: - app: rook-nfs-webhook - spec: - containers: - - name: rook-nfs-webhook - image: rook/nfs:master - imagePullPolicy: IfNotPresent - args: ["nfs", "webhook"] - ports: - - containerPort: 9443 - name: webhook-server - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: rook-nfs-webhook-cert -``` - -Since *Semantic Validation* will be optional, validating CustomResource in the controller should still there. The improvement that will be introduced is if a given resource is invalid it should be given information in the CustomResouce status subresource. - -## References - -1. https://book.kubebuilder.io/cronjob-tutorial/controller-overview.html -1. https://pkg.go.dev/sigs.k8s.io/controller-runtime -1. https://kubernetes.io/docs/concepts/extend-kubernetes/extend-cluster/ -1. https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ -1. https://www.openshift.com/blog/kubernetes-operators-best-practices - -[rook-watchcr]: https://github.com/rook/rook/blob/release-1.3/pkg/operator/k8sutil/customresource.go#L48 -[cert-manager]: https://cert-manager.io/ -[cert-manager-installation]: https://cert-manager.io/docs/installation/ -[cert-manager-cainjector]: https://cert-manager.io/docs/concepts/ca-injector/ -[controller-runtime]: https://github.com/kubernetes-sigs/controller-runtime -[controller-runtime-createorupdate]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil#CreateOrUpdate -[controller-runtime-reconcile]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/reconcile#Func.Reconcile -[controller-runtime-reconciler]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler -[controller-runtime-defaulter]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/webhook/admission#Defaulter -[controller-runtime-validator]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/webhook/admission#Validator -[controller-runtime-webhook-builder]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/builder#WebhookBuilder \ No newline at end of file diff --git a/design/nfs/nfs-provisioner-controlled-by-operator.md b/design/nfs/nfs-provisioner-controlled-by-operator.md deleted file mode 100644 index 375d242fe..000000000 --- a/design/nfs/nfs-provisioner-controlled-by-operator.md +++ /dev/null @@ -1,115 +0,0 @@ -# NFS Provisioner Controlled by Operator - -## Summary - -NFS Provisioner is a built in dynamic provisioner for Rook NFS. The functionality works fine but has an issue where the provisioner uses the same underlying directory for each provisioned PV when provisioning two or more PV in the same share/export. This overlap means that each provisioned PV for a share/export can read/write each others data. - -This hierarchy is the current behaviour of NFS Provisioner when provisioning two PV in the same share/export: - -```text -export -├── sample-export -|   ├── data (from PV-A) -|   ├── data (from PV-B) -|   ├── data (from PV-A) -|   └── data (from PV-A) -└── another-export -``` - -Both PV-A and PV-B uses the `sample-export` directory as their data location. - -This proposal is to make Rook NFS Provisioner create a sub-directory for every provisioned PV in the same share/export. So it will have a hierarchy like: - -```text -export -├── sample-export -│   ├── pv-a -│   │   ├── data (from PV-A) -│   │   ├── data (from PV-A) -│   │   └── data (from PV-A) -│   └── pv-b -│      └── data (from PV-B) -└── another-export -``` - -Since those directories are not in the NFS Provisioner pod but in the NFS Server pod, NFS Provisioner cannot directly create sub-directories for them. The solution is to mount the whole underlying NFS share/export directory so that the NFS Provisioner can create a sub-directory for each provisioned PV. - -### Original Issue - -- https://github.com/rook/rook/issues/4982 - -### Goals - -- Make NFS Provisioner to create sub-directory for each provisioned PVs. -- Make NFS Provisioner use the sub-directory for each provisioned PV instead of using underlying directory. -- Improve reliability of NFS Provisioner. - -### Non-Goals - -- NFS Operator manipulates uncontrolled resources. - -## Proposal details - -The approach will be similar to [Kubernetes NFS Client Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client), where the provisioner mounts the whole of NFS share/export into the provisioner pod (by kubelet), so that the provisioner can then create the appropriate sub-directory for each provisioned PV. Currently Rook NFS Provisioner is deployed independently and before the NFS Server itself, so we cannot mount the NFS share because we don't know the NFS Server IP or the share/export directory. - -The idea is to make NFS Provisioner controlled by the operator. So when an NFS Server is created, the operator also then creates its provisioner, which mounts each NFS share/export. Then, the NFS Provisioner can create a sub-directory for each provisioned PV. - -This is the example NFS Server - -```yaml -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - ... - persistentVolumeClaim: - claimName: nfs-default-claim - - name: share2 - ... - persistentVolumeClaim: - claimName: nfs-another-claim -``` - -And the operator will creates the provisioner deployment like - -```yaml -kind: Deployment -apiVersion: apps/v1 -metadata: - name: rook-nfs-provisioner - namespace: rook-nfs -spec: - ... - spec: - .... - containers: - - name: rook-nfs-provisioner - image: rook/nfs:master - args: ["nfs", "provisioner","--provisioner=nfs.rook.io/nfs-server-provisioner"] - volumes: - - name: share1 - nfs: - server: - path: /export/nfs-default-claim - - name: share2 - nfs: - server: - path: /export/nfs-another-claim -``` - -The provisioner deployment will be created in the same namespace as the NFS server and with the same privileges. Since the provisioner is automatically created by the operator, the provisioner deployment name and provisioner name flag (`--provisioner`) value will depend on NFSServer name. The provisioner deployment name will have an added suffix of `-provisioner` and the provisioner name will start with `nfs.rook.io/`. - -## Alternatives - -The other possible approach is NFS Provisioner mounts the NFS Server share manually (by executing `mount` command) before creating an appropriate directory for each PV. But in my humble opinion, NFS Provisioner would be lacking reliability under several conditions like NFSServer getting its exports updated, the cluster has two or more NFSServer, etc. - -## Glossary - -**Provisioned PV:** Persistent Volumes which provisioned by rook nfs provisioner through Storage Class and Persistent Volumes Claims. - -**NFS share/export:** A directory in NFS Server which exported using nfs protocol. diff --git a/design/nfs/nfs-quota.md b/design/nfs/nfs-quota.md deleted file mode 100644 index 357f54a27..000000000 --- a/design/nfs/nfs-quota.md +++ /dev/null @@ -1,122 +0,0 @@ -# NFS Quota - -## Background - -Currently, when the user creates NFS PersistentVolumes from an NFS Rook share/export via PersistentVolumeClaim, the provisioner does not provide the specific capacity as requested. For example the users create NFS PersistentVolumes via PersistentVolumeClaim as following: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: rook-nfs-pv-claim -spec: - storageClassName: "rook-nfs-share" - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi -``` - -The client still can use the higher capacity than `1mi` as requested. - -This proposal is to add features which the Rook NFS Provisioner will provide the specific capacity as requested from `.spec.resources.requests.storage` field in PersistentVolumeClaim. - -## Implementation - -The implementation will be use `Project Quota` on xfs filesystem. When the users need to use the quota feature they should use xfs filesystem with `prjquota/pquota` mount options for underlying volume. Users can specify filesystem type and mount options through StorageClass that will be used for underlying volume. For example: - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: standard-xfs -parameters: - fsType: xfs -mountOptions: - - prjquota -... -``` - -> Note: Many distributed storage providers for Kubernetes support xfs filesystem. Typically by defining `fsType: xfs` or `fs: xfs` (depend on storage providers) in storageClass parameters. for more detail about specify filesystem type please see https://kubernetes.io/docs/concepts/storage/storage-classes/ - -Then the underlying PersistentVolumeClaim should be using that StorageClass - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-default-claim -spec: - storageClassName: "standard-xfs" - accessModes: - - ReadWriteOnce -... -``` - -If the above conditions are met then the Rook NFS Provisioner will create projects and set the quota limit using [xfs_quota](https://linux.die.net/man/8/xfs_quota) before creating PersistentVolumes based on `.spec.resources.requests.storage` field in PersistentVolumeClaim. Otherwise the Rook NFS Provisioner will provision a PersistentVolumes without creating setting the quota. - -To creating the project, Rook NFS Provisioner will invoke the following command - -> xfs_quota -x -c project -s -p '*nfs_pv_directory* *project_id*' *projects_file* - -And setting quota with the command - -> xfs_quota -x -c 'limit -p bhard=*size* *project_id*' *projects_file* - -which - -1. *nfs_pv_directory* is sub-directory from exported directory that used for NFS PV. -1. *project_id* is unique id `uint16` 1 to 65535. -1. *size* is size of quota as requested. -1. *projects_file* is file that contains *project quota block* for persisting quota state purpose. In case the Rook NFS Provisioner pod is killed, Rook NFS Provisioner pod will restore the quota state based on *project quota block* entries in *projects_file* at startup. -1. *project quota block* is combine of *project_id*:*nfs_pv_directory*:*size* - -Since Rook NFS has the ability to create more than one NFS share/export that have different underlying volume directories, the *projects_file* will be saved on each underlying volume directory. So each NFS share/export will have different *projects_file* and each *project_file* will be persisted. The *projects_file* will only be created if underlying volume directory is mounted as `xfs` with `prjquota` mount options. This mean the existence of *project_file* will indicate if quota was enabled. The hierarchy of directory will look like: - -```text -/ -├── underlying-volume-A (export A) (mounted as xfs with prjquota mount options) -│ ├── projects_file -│ ├── nfs-pv-a (PV-A) (which quota created for) -│ │ ├── data (from PV-A) -│ └── nfs-pv-b (PV-B) (which quota created for) -│ └── data (from PV-B) -├── underlying-volume-B (export B) (mounted as xfs with prjquota mount options) -│ ├── projects_file -│ └── nfs-pv-c (PV-C) (which quota created for) -└── underlying-volume-C (export C) (not mounted as xfs) - └── nfs-pv-d (PV-D) (quota not created) -``` - -The hierarchy above is example Rook NFS has 3 nfs share/exports (A, B and C). *project_file* inside underlying-volume-A will contains *project quota block* like - -``` -1:/underlying-volume-A/nfs-pv-a:size -2:/underlying-volume-A/nfs-pv-b:size -``` - -*project_file* inside underlying-volume-B will look like - -``` -1:/underlying-volume-B/nfs-pv-c:size -``` - -underlying-volume-C not have *project_file* because it is not mounted as xfs filesystem. - -### Updating container image - -Since `xfs_quota` binary is not installed by default we need to update Rook NFS container image by installing `xfsprogs` package. - -### Why XFS - -Most of Kubernetes VolumeSource use ext4 filesystem type if `fsType` is unspecified by default. Ext4 also have project quota feature starting in [Linux kernel 4.4](https://lwn.net/Articles/671627/). But not like xfs which natively support project quota, to mount ext4 with prjquota option we need additional step such as enable the project quota through [tune2fs](https://linux.die.net/man/8/tune2fs) before it mounted and some linux distro need additional kernel module for quota management. So for now we will only support xfs filesystem when users need quota feature in Rook NFS and might we can expand to ext4 filesystem also if possible. - -## References - -1. https://kubernetes.io/docs/concepts/storage/volumes/ -1. https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/ -1. https://linux.die.net/man/8/xfs_quota -1. https://lwn.net/Articles/671627/ -1. https://linux.die.net/man/8/tune2fs -1. https://www.digitalocean.com/community/tutorials/how-to-set-filesystem-quotas-on-ubuntu-18-04#step-2-%E2%80%93-installing-the-quota-kernel-module diff --git a/design/nfs/nfs.md b/design/nfs/nfs.md deleted file mode 100644 index 3e035383d..000000000 --- a/design/nfs/nfs.md +++ /dev/null @@ -1,290 +0,0 @@ -# Add NFS to Rook - -## Overview - -This document explores a design to add NFS to Rook. This is a part of the rook feature request [#1551](https://github.com/rook/rook/issues/1551). - -## Rook Architecture - -Rook turns distributed storage software into a self-managing, self-scaling, and self-healing storage services. It does this by automating deployment, bootstrapping, configuration, provisioning, scaling, upgrading, migration, disaster recovery, monitoring, and resource management. Rook uses the facilities provided by the underlying cloud-native container management, scheduling and orchestration platform to perform its duties. -![Rook Architecture on Kubernetes](../../Documentation/media/rook-architecture.png) - -## Network File System (NFS) - -NFS allows remote hosts to mount file systems over a network and interact with those file systems as though they are mounted locally. This enables system administrators to consolidate resources onto centralized servers on the network. - -## Why NFS? - -NFS is widely used for persistent storage in kubernetes cluster. Using NFS storage is a convenient and easy way to provision storage for applications. -An NFS volume allows an existing NFS (Network File System) share to be mounted into the pod. -The contents of an NFS volume are preserved and the volume is merely unmounted if the pod is stopped/destroyed. This means that an NFS volume can be pre-populated with data, and that data can be “handed off” between pods. -NFS supports multiple read/write simultaneously so a single share can be attached to multiple pods. - -## Design -With this design Rook is exploring to providing another widely adopted storage option for admins and users of cloud-native environments. This design tends to automate NFS starting from its configuration (such as allowed hosts, read/write permissions etc.) to deployment and provisioning. The operations on NFS which cannot be done natively by Kubernetes will be automated. -NFS doesn’t provide an internal provisioner for kubernetes, so Rook is needed as an external provisioner. -This design uses NFS-Ganesha server and NFS v4. - -### Initial Setup - -The flow of creating NFS backed storage in Rook is -1. The settings are determined and saved in an NFS server CRD (rook-nfs.yaml) -2. `kubectl create -f rook-nfs.yaml` -3. When the NFS CRD instance is created, Rook responds to this request by starting the NFS daemon with the required configuration and exports stated in the CRD and creates a service to expose NFS. -4. NFS volume is ready to be consumed by other pods through a PVC. - -### NFS CRD - -The NFS CRD spec will specify the following: -1. NFS server storage backend configuration. E.g., configuration for various storage backends(ceph, ebs, azure disk etc) that will be shared using NFS. -2. NFS server configuration - The following points are required for configuring NFS server: - - export (The volume being exported) - - client (The host or network to which the export is being shared) - - client options (The options to be used for the client) e.g., read and write permission, root squash etc. - -The parameters to configure NFS CRD are demonstrated in the example below which is followed by a table that explains the parameters: - -A simple example for sharing a volume(could be hostPath, cephFS, cephRBD, googlePD, EBS etc.) using NFS, without client specification and per export based configuration, whose NFS-Ganesha export entry looks like: -``` -EXPORT { - Export_Id = 1; - Path = /export; - Pseudo = /nfs-share; - Protocols = 4; - Sectype = sys; - Access_Type = RW; - Squash = none; - FSAL { - Name = VFS; - } -} -``` -the CRD instance will look like the following: -```yaml -apiVersion: rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: nfs-share - server: - accessMode: ReadWrite - squash: root - persistentVolumeClaim: - claimName: googlePD-claim -``` -The table explains each parameter - -| Parameter | Description | Default | -| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | -| `replicas` | The no. of NFS daemon to start | `1` | -| `exports` | Parameters for creating an export | | -| `exports.name` | Name of the volume being shared | | -| `exports.server` | NFS server configuration | | -| `exports.server.accessMode` | Volume access modes(Reading and Writing) for the share | `ReadOnly` | -| `exports.server.squash` | This prevents root users connected remotely from having root privileges | `root` | -| `exports.server.allowedClients` | Access configuration for clients that can consume the NFS volume | | -| `exports.server.allowedClients.name` | Name of the host/hosts | | -| `exports.server.allowedClients.clients` | The host or network to which export is being shared.(could be hostname, ip address, netgroup, CIDR network address, or all) | | -| `exports.server.allowedClients.accessMode` | Reading and Writing permissions for the client* | `ReadOnly` | -| `exports.server.allowedClients.squash` | Squash option for the client* | `root` | -| `exports.persistentVolumeClaim` | Claim to get volume(Volume could come from hostPath, cephFS, cephRBD, googlePD, EBS etc. and these volumes will be exposed by NFS server ). | | -| `exports.persistentVolumeClaim.claimName` | Name of the PVC | | - -*note: if `exports.server.accessMode` and `exports.server.squash` options are mentioned, `exports.server.allowedClients.accessMode` and `exports.server.allowedClients.squash` are overridden respectively. - -Available options for `volumes.allowedClients.accessMode` are: -1. ReadOnly -2. ReadWrite -3. none - -Available options for `volumes.allowedClients.squash` are: -1. none (No user id squashing is performed) -2. rootId (uid 0 and gid 0 are squashed to the anonymous uid and anonymous gid) -3. root (uid 0 and gid of any value are squashed to the anonymous uid and anonymous gid) -4. all (All users are squashed) - -The volume that needs to be exported by NFS must be attached to NFS server pod via PVC. Examples of volume that can be attached are Host Path, AWS Elastic Block Store, GCE Persistent Disk, CephFS, RBD etc. The limitations of these volumes also apply while they are shared by NFS. The limitation and other details about these volumes can be found [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). - -### Examples - -Here are some examples for advanced configuration: - -1. For sharing a volume(could be hostPath, cephFS, cephRBD, googlePD, EBS etc.) using NFS, which will be shared as /nfs-share by the NFS server with different options for different clients whose NFS-Ganesha export entry looks like: -``` -EXPORT { - Export_Id = 1; - Path = /export; - Pseudo = /nfs-share; - Protocols = 4; - Sectype = sys; - FSAL { - Name = VFS; - } - CLIENT { - Clients = 172.17.0.5; - Access_Type = RO; - Squash = root; - } - CLIENT { - Clients = 172.17.0.0/16, serverX; - Access_Type = RW; - Squash = none; - } -} -``` -the CRD instance will look like the following: -```yaml -apiVersion: rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: nfs-share - server: - allowedClients: - - name: host1 - clients: 172.17.0.5 - accessMode: ReadOnly - squash: root - - name: host2 - clients: - - 172.17.0.0/16 - - serverX - accessMode: ReadWrite - squash: none - persistentVolumeClaim: - claimName: ebs-claim -``` - -2. For sharing multiple volumes using NFS, which will be shared as /share1 and /share2 by the NFS server whose NFS-Ganesha export entry looks like: -``` -EXPORT { - Export_Id = 1; - Path = /export; - Pseudo = /share1; - Protocols = 4; - Sectype = sys; - FSAL { - Name = VFS; - } - CLIENT { - Clients = all; - Access_Type = RO; - Squash = none; - } -} -EXPORT { - Export_Id = 2; - Path = /export2; - Pseudo = /share2; - Protocols = 4; - Sectype = sys; - FSAL { - Name = VFS; - } - CLIENT { - Clients = all; - Access_Type = RW; - Squash = none; - } -} -``` -the CRD instance will look like the following: -```yaml -apiVersion: rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-multi-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: share1 - server: - allowedClients: - - name: ebs-host - clients: all - accessMode: ReadOnly - squash: none - persistentVolumeClaim: - claimName: ebs-claim - - name: share2 - server: - allowedClients: - - name: ceph-host - clients: all - accessMode: ReadWrite - squash: none - persistentVolumeClaim: - claimName: cephfs-claim -``` - -## Adding and Removing exports from an existing NFS server -Exports can be added and removed by updating the CRD using kubectl edit/replace -f rook-nfs.yaml - -## Client Access -The administrator creates a storage class. -Here is an example of NFS storage class for Example 1: -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-nfs -provisioner: nfs.rook.io/nfs -parameters: - server: nfs-vol - export: nfs-share -``` - -The user can use the NFS volume by creating a PVC. -Here is an example of NFS PVC -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: httpd-pv-claim - labels: - app: web -spec: - storageClassName: rook-nfs - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: web-server - labels: - app: web -spec: - template: - metadata: - labels: - app: web - tier: httpd - spec: - containers: - - image: httpd - name: httpd - ports: - - containerPort: 80 - name: httpd - volumeMounts: - - name: httpd-persistent-storage - mountPath: /var/www/html - volumes: - - name: httpd-persistent-storage - persistentVolumeClaim: - claimName: httpd-pv-claim ---- diff --git a/go.mod b/go.mod index 65881bb00..2fe516323 100644 --- a/go.mod +++ b/go.mod @@ -1,34 +1,26 @@ -module github.com/rook/rook +module github.com/rook/cassandra go 1.16 require ( - github.com/aws/aws-sdk-go v1.35.24 github.com/banzaicloud/k8s-objectmatcher v1.1.0 - github.com/ceph/go-ceph v0.10.1-0.20210729101705-11f319727ffb github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/csi-addons/volume-replication-operator v0.1.1-0.20210525040814-ab575a2879fb github.com/davecgh/go-spew v1.1.1 + github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-ini/ini v1.51.1 - github.com/google/go-cmp v0.5.5 github.com/google/uuid v1.1.2 - github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.1.0 - github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210818162813-3eee31c01875 - github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec - github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 - github.com/openshift/machine-api-operator v0.2.1-0.20190903202259-474e14e4965a github.com/pkg/errors v0.9.1 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0 - github.com/prometheus-operator/prometheus-operator/pkg/client v0.46.0 github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 github.com/tevino/abool v1.2.0 github.com/yanniszark/go-nodetool v0.0.0-20191206125106-cd8f91fa16be - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - gopkg.in/ini.v1 v1.57.0 + golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 // indirect + google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e // indirect + google.golang.org/grpc v1.33.2 // indirect gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.21.2 k8s.io/apiextensions-apiserver v0.21.1 @@ -36,12 +28,8 @@ require ( k8s.io/apiserver v0.21.1 k8s.io/client-go v0.21.2 k8s.io/cloud-provider v0.21.1 - k8s.io/component-helpers v0.21.1 - k8s.io/kube-controller-manager v0.21.1 k8s.io/utils v0.0.0-20210527160623-6fdb442a123b sigs.k8s.io/controller-runtime v0.9.0 - sigs.k8s.io/kustomize/kyaml v0.10.17 - sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0 ) replace ( diff --git a/go.sum b/go.sum index fef8d50ba..40639e763 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,5 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= @@ -35,29 +32,19 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -65,11 +52,8 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= @@ -79,18 +63,9 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/IBM/keyprotect-go-client v0.5.1/go.mod h1:5TwDM/4FRJq1ZOlwQL1xFahLWQ3TveR88VmL1u3njyI= -github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= -github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -99,13 +74,6 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= -github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a/go.mod h1:D73UAuEPckrDorYZdtlCu2ySOLuPB5W4rhIkmmc/XbI= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -113,101 +81,60 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= -github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= -github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/armon/go-metrics v0.3.1 h1:oNd9vmHdQuYICjy5hE2Ysz2rsIOBl4z7xA6IErlfd48= -github.com/armon/go-metrics v0.3.1/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e h1:h0gP0hBU6DsA5IQduhLWGOEfIUKzJS5hhXQBSgHuF/g= -github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.35.24 h1:U3GNTg8+7xSM6OAJ8zksiSM4bRqxBWmVwwehvOSNG3A= -github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= -github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/banzaicloud/k8s-objectmatcher v1.1.0 h1:KHWn9Oxh21xsaGKBHWElkaRrr4ypCDyrh15OB1zHtAw= github.com/banzaicloud/k8s-objectmatcher v1.1.0/go.mod h1:gGaElvgkqa0Lk1khRr+jel/nsCLfzhLnD3CEWozpk9k= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y= github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE= -github.com/ceph/go-ceph v0.10.1-0.20210729101705-11f319727ffb h1:rkflsGZM6dOf1GcbnPF3J0P72NwKVhqXgleFf3Nuqb4= -github.com/ceph/go-ceph v0.10.1-0.20210729101705-11f319727ffb/go.mod h1:mafFpf5Vg8Ai8Bd+FAMvKBHLmtdpTXdRP/TNq8XWegY= github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s= github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -220,32 +147,18 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= -github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= -github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M= -github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk= -github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8= @@ -259,28 +172,17 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= -github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= -github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= -github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -289,35 +191,23 @@ github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ER github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.51.1 h1:/QG3cj23k5V8mOl4JnNzUNhc1kr/jzMiNsNuWKcx8gM= -github.com/go-ini/ini v1.51.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= -github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= -github.com/go-log/log v0.0.0-20181211034820-a514cf01a3eb/go.mod h1:4mBwpdRMFLiuXZDCwU2lKQFsoSCo72j3HqBK9d81N2M= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -364,7 +254,6 @@ github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pL github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -376,34 +265,18 @@ github.com/go-openapi/validate v0.17.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+ github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= -github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= -github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -434,10 +307,6 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= @@ -453,9 +322,6 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJSZktIsTjb50eB72U2YZ9K0= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -483,20 +349,11 @@ github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -507,231 +364,80 @@ github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20170330212424-2500245aa611/ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/consul-template v0.25.0/go.mod h1:/vUsrJvDuuQHcxEw0zik+YXTS7ZKWZjQeaQhshBmfH0= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= -github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= -github.com/hashicorp/go-gcp-common v0.6.0/go.mod h1:RuZi18562/z30wxOzpjeRrGcmk9Ro/rBzixaSZDhIhY= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= -github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88/go.mod h1:Pm+Umb/6Gij6ZG534L7QDyvkauaOQWGb+arj9aFjCE0= -github.com/hashicorp/go-kms-wrapping v0.5.1 h1:Ed6Z5gV3LY3J9Ora4cwxVmV8Hyt6CPOTrQoGIPry2Ew= -github.com/hashicorp/go-kms-wrapping v0.5.1/go.mod h1:cGIibZmMx9qlxS1pZTUrEgGqA+7u3zJyvVYMhjU2bDs= -github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs= -github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= -github.com/hashicorp/go-memdb v1.0.2 h1:AIjzJlwIxz2inhZqRJZfe6D15lPeF0/cZyS1BVlnlHg= -github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTgIUQ0t/t32M= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= -github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d/go.mod h1:WKCL+tLVhN1D+APwH3JiTRZoxcdwRk86bWu1LVCUPaE= -github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= -github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 h1:p+2EISNdFCnD9R+B4xCiqSn429MCFtvM41aHJDJ6qW4= -github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab h1:WzGMwlO1DvaC93SvVOBOKtn+nXGEDXapyJuaRV3/VaY= -github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= -github.com/hashicorp/vault v1.4.2 h1:KnAPBTb4G7JidQiUXVDk3+LPp+iWPMbMsGmw4POJI4k= -github.com/hashicorp/vault v1.4.2/go.mod h1:500fLOj7p92Ys4X265LizqF78MzmHJUf1jV1zNJt060= -github.com/hashicorp/vault-plugin-auth-alicloud v0.5.5/go.mod h1:sQ+VNwPQlemgXHXikYH6onfH9gPwDZ1GUVRLz0ZvHx8= -github.com/hashicorp/vault-plugin-auth-azure v0.5.6-0.20200422235613-1b5c70f9ef68/go.mod h1:RCVBsf8AJndh4c6iGZtvVZFui9SG0Bj9fnF0SodNIkw= -github.com/hashicorp/vault-plugin-auth-centrify v0.5.5/go.mod h1:GfRoy7NHsuR/ogmZtbExdJXUwbfwcxPrS9xzkyy2J/c= -github.com/hashicorp/vault-plugin-auth-cf v0.5.4/go.mod h1:idkFYHc6ske2BE7fe00SpH+SBIlqDKz8vk/IPLJuX2o= -github.com/hashicorp/vault-plugin-auth-gcp v0.5.1/go.mod h1:eLj92eX8MPI4vY1jaazVLF2sVbSAJ3LRHLRhF/pUmlI= -github.com/hashicorp/vault-plugin-auth-gcp v0.6.2-0.20200428223335-82bd3a3ad5b3/go.mod h1:U0fkAlxWTEyQ74lx8wlGdD493lP1DD/qpMjXgOEbwj0= -github.com/hashicorp/vault-plugin-auth-jwt v0.6.2/go.mod h1:SFadxIfoLGzugEjwUUmUaCGbsYEz2/jJymZDDQjEqYg= -github.com/hashicorp/vault-plugin-auth-kerberos v0.1.5/go.mod h1:r4UqWITHYKmBeAMKPWqLo4V8bl/wNqoSIaQcMpeK9ss= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.6.1/go.mod h1:/Y9W5aZULfPeNVRQK0/nrFGpHWyNm0J3UWhOdsAu0vM= -github.com/hashicorp/vault-plugin-auth-oci v0.5.4/go.mod h1:j05O2b9fw2Q82NxDPhHMYVfHKvitUYGWfmqmpBdqmmc= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4/go.mod h1:QjGrrxcRXv/4XkEZAlM0VMZEa3uxKAICFqDj27FP/48= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.2-0.20200520204052-f840e9d4895c/go.mod h1:MP3kfr0N+7miOTZFwKv952b9VkXM4S2Q6YtQCiNKWq8= -github.com/hashicorp/vault-plugin-secrets-ad v0.6.6-0.20200520202259-fc6b89630f9f/go.mod h1:kk98nB+cwDbt3I7UGQq3ota7+eHZrGSTQZfSRGpluvA= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.5/go.mod h1:gAoReoUpBHaBwkxQqTK7FY8nQC0MuaZHLiW5WOSny5g= -github.com/hashicorp/vault-plugin-secrets-azure v0.5.6/go.mod h1:Q0cIL4kZWnMmQWkBfWtyOd7+JXTEpAyU4L932PMHq3E= -github.com/hashicorp/vault-plugin-secrets-gcp v0.6.2-0.20200507171538-2548e2b5058d/go.mod h1:jVTE1fuhRcBOb/gnCT9W++AnlwiyQEX4S8iVCKhKQsE= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.5/go.mod h1:b6RwFD1bny1zbfqhD35iGJdQYHRtJLx3HfBD109GO38= -github.com/hashicorp/vault-plugin-secrets-kv v0.5.5/go.mod h1:oNyUoMMQq6uNTwyYPnkldiedaknYbPfQIdKoyKQdy2g= -github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2/go.mod h1:YRW9zn9NZNitRlPYNAWRp/YEdKCF/X8aOg8IYSxFT5Y= -github.com/hashicorp/vault-plugin-secrets-openldap v0.1.3-0.20200518214608-746aba5fead6/go.mod h1:9Cy4Jp779BjuIOhYLjEfH3M3QCUxZgPnvJ3tAOOmof4= -github.com/hashicorp/vault/api v1.0.1/go.mod h1:AV/+M5VPDpB90arloVX0rVDUIHkONiwz5Uza9HRtpUE= -github.com/hashicorp/vault/api v1.0.5-0.20190730042357-746c0b111519/go.mod h1:i9PKqwFko/s/aihU1uuHGh/FaQS+Xcgvd9dvnfAvQb0= -github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo= -github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= -github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= -github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a h1:1DIoo5Mqq4RKFpL2iOmrX7DJIdMLiAt1Tv5f8nMJqRI= -github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= -github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU= -github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= -github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= -github.com/hashicorp/vault/sdk v0.1.14-0.20191229212425-c478d00be0d6/go.mod h1:EhK3a4sYnUbANAWxDP4LHf1GvP8DCtISGemfbEGbeo8= -github.com/hashicorp/vault/sdk v0.1.14-0.20200215195600-2ca765f0a500/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= -github.com/hashicorp/vault/sdk v0.1.14-0.20200305172021-03a3749f220d/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= -github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= -github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= -github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= -github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 h1:e1ok06zGrWJW91rzRroyl5nRNqraaBe4d5hiKcVZuHM= -github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jcmturner/aescts v1.0.1/go.mod h1:k9gJoDUf1GH5r2IBtBjwjDCoLELYxOcEhitdP8RL7qQ= -github.com/jcmturner/dnsutils v1.0.1/go.mod h1:tqMo38L01jO8AKxT0S9OQVlGZu3dkEt+z5CA+LOhwB0= -github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.0.0/go.mod h1:4/sqKY8Yzo/TIQ8MoCyk/EPcjb+czI9czxHcdXuZbFA= -github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2/go.mod h1:xkfESuHriIekR+4RoV+fu91j/CfnYM29Zi2tMFw5iD4= -github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f h1:E87tDTVS5W65euzixn7clSzK66puSt1H4I5SC0EmHH4= -github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f/go.mod h1:3J2qVK16Lq8V+wfiL2lPeDZ7UWMxk5LemerHa1p6N00= -github.com/jefferai/jsonx v1.0.0 h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI= -github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.0.0-20141017032234-72f9bd7c4e0c/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.1.0 h1:IwEFm6n6dvFAqpi3BtcTgnjwM/oj9hA30ZV7d4I0FGU= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.1.0/go.mod h1:+1DpV8uIwteAhxNO0lgRox8gHkTG6w3OeDfAlg+qqjA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/kelseyhightower/envconfig v1.3.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f h1:Gsc9mVHLRqBjMgdQCghN9NObCcRncDqxJvBvEaIIQEo= -github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210818162813-3eee31c01875 h1:jX3VXgmNOye8XYKjwcTVXcBYcPv3jj657fwX8DN/HiM= -github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210818162813-3eee31c01875/go.mod h1:XpQ9HGG9uF5aJCBP+s6w5kSiyTIVSqCV8+XAE4qms5E= github.com/kubernetes-csi/csi-lib-utils v0.9.1 h1:sGq6ifVujfMSkfTsMZip44Ttv8SDXvsBlFk9GdYl/b8= github.com/kubernetes-csi/csi-lib-utils v0.9.1/go.mod h1:8E2jVUX9j3QgspwHXa6LwyN7IHQDjW9jX3kwoWnSC+M= -github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= -github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/libopenstorage/autopilot-api v0.6.1-0.20210128210103-5fbb67948648/go.mod h1:6JLrPbR3ZJQFbUY/+QJMl/aF00YdIrLf8/GWAplgvJs= -github.com/libopenstorage/openstorage v8.0.0+incompatible/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= -github.com/libopenstorage/operator v0.0.0-20200725001727-48d03e197117/go.mod h1:Qh+VXOB6hj60VmlgsmY+R1w+dFuHK246UueM4SAqZG0= -github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec h1:ezv9ybzCRb86E8aMgG7/GcNSRU/72D0BVEhkNjnCEz8= -github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec/go.mod h1:gE8rSd6lwLNXNbiW3DrRZjFMs+y4fDHy/6uiOO9cdzY= -github.com/libopenstorage/stork v1.3.0-beta1.0.20200630005842-9255e7a98775/go.mod h1:qBSzYTJVHlOMg5RINNiHD1kBzlasnrc2uKLPZLgu1Qs= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -741,52 +447,22 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= -github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= -github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod h1:+pmbihVqjC3GPdfWv1V2TnRSuVvwrWLKfEP/MZVB/Wc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.29 h1:xHBEhR+t5RzcFJjBLJlax2daXOrTYtr9z4WdKEfWFzg= -github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= @@ -798,37 +474,24 @@ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lN github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/mongodb/go-client-mongodb-atlas v0.1.2/go.mod h1:LS8O0YLkA+sbtOb3fZLF10yY3tJM+1xATXMJ3oU35LU= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwielbut/pointy v1.1.0/go.mod h1:MvvO+uMFj9T5DMda33HlvogsFBX7pWWKAkFIn4teYwY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc/go.mod h1:1rLVY/DWf3U6vSZgH16S7pymfrhK2lcUlXjgGglw/lY= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= -github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/okta/okta-sdk-golang v1.0.1/go.mod h1:8k//sN2mFTq8Ayo90DqGbcumCkSmYjF0+2zkIbZysec= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= @@ -836,48 +499,19 @@ github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= -github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47/go.mod h1:u7NRAjtYVAKokiI9LouzTv4mhds8P4S1TwdVAfbjKSk= -github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 h1:IDZyg/Kye98ptqpc9j9rzPjZJlijjEDe8g7TZ67CmLU= -github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668/go.mod h1:T18COkr6nLh9RyZKPMP7YjnwBME7RX8P2ar1SQbBltM= -github.com/openshift/machine-api-operator v0.2.1-0.20190903202259-474e14e4965a h1:mcl6pEpG0ZKeMnAMhtmcoy7jFY8PcMRHmxdRQmowxo4= -github.com/openshift/machine-api-operator v0.2.1-0.20190903202259-474e14e4965a/go.mod h1:7HeAh0v04zQn1L+4ItUjvpBQYsm2Nf81WaZLiXTcnkc= -github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= -github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= -github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= -github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= -github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/patrickmn/go-cache v0.0.0-20180815053127-5633e0862627/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= -github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -885,97 +519,53 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/portworx/dcos-secrets v0.0.0-20180616013705-8e8ec3f66611/go.mod h1:4hklRW/4DQpLqkcXcjtNprbH2tz/sJaNtqinfPWl/LA= -github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= -github.com/portworx/sched-ops v0.20.4-openstorage-rc3/go.mod h1:DpRDDqXWQrReFJ5SHWWrURuZdzVKjrh2OxbAfwnrAyk= -github.com/portworx/talisman v0.0.0-20191007232806-837747f38224/go.mod h1:OjpMH9Uh5o9ntVGktm4FbjLNwubJ3ITih2OfYrAeWtA= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= -github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0 h1:J+aQlaDVIemgZDR1f/48MBaiA7rDTm6OyKSRhDX2ZTY= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= -github.com/prometheus-operator/prometheus-operator/pkg/client v0.46.0 h1:J5gQysJlkhJWOzCZQUx/POBqD6dpM5OI6TRZtAXgYpM= -github.com/prometheus-operator/prometheus-operator/pkg/client v0.46.0/go.mod h1:k4BrWlVQQsvBiTcDnKEMgyh/euRxyxgrHdur/ZX/sdA= -github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.1.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil v2.19.9+incompatible h1:IrPVlK4nfwW10DF7pW+7YJKws9NkgNzWozwwWv9FsgY= -github.com/shirou/gopsutil v2.19.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= @@ -987,21 +577,17 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.0-20180319062004-c439c4fa0937/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -1020,19 +606,12 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yanniszark/go-nodetool v0.0.0-20191206125106-cd8f91fa16be h1:e8XjnroTyruokenelQLRje3D3nbti3ol45daXg5iWUA= github.com/yanniszark/go-nodetool v0.0.0-20191206125106-cd8f91fa16be/go.mod h1:8e/E6xP+Hyo+dJy51hlGEbJkiYl0fEzvlQdqAEcg1oQ= @@ -1044,24 +623,17 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.2.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= -go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1078,34 +650,26 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1122,7 +686,6 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1144,17 +707,13 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1170,7 +729,6 @@ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1184,21 +742,15 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1223,35 +775,23 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1259,9 +799,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1269,7 +807,6 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1280,7 +817,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1309,16 +845,13 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1335,7 +868,6 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190718200317-82a3ea8a504c/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1350,7 +882,6 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1383,20 +914,13 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= -google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1412,11 +936,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= @@ -1424,14 +945,10 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1476,7 +993,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1485,28 +1001,15 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= -gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/ldap.v3 v3.0.3/go.mod h1:oxD7NyBuxchC+SgJDE1Q5Od05eGt29SDQVBmV+HYbzw= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= @@ -1516,20 +1019,16 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1537,96 +1036,55 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/api v0.0.0-20190409092523-d687e77c8ae9/go.mod h1:FQEUn50aaytlU65qqBn/w+5ugllHwrBzKm7DzbnXdzE= -k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= k8s.io/api v0.15.7/go.mod h1:a/tUxscL+UxvYyA7Tj5DRc8ivYqJIO1Y5KDdlI6wSvo= k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.19.1/go.mod h1:+u/k4/K/7vp4vsfdT7dyl8Oxk1F26Md4g5F26Tu85PU= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= -k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= -k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y= k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= -k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= k8s.io/apiextensions-apiserver v0.15.7/go.mod h1:ctb/NYtsiBt6CGN42Z+JrOkxi9nJYaKZYmatJ6SUy0Y= -k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= -k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= k8s.io/apiextensions-apiserver v0.21.1 h1:AA+cnsb6w7SZ1vD32Z+zdgfXdXY8X9uGX5bN6EoPEIo= k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= k8s.io/apimachinery v0.0.0-20181116115711-1b0702fe2927/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b/go.mod h1:FW86P8YXVLsbuplGMZeb20J3jYHscrDqw4jELaFJvRU= -k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= k8s.io/apimachinery v0.15.7/go.mod h1:Xc10RHc1U+F/e9GCloJ8QAeCGevSVP5xhOhqlE+e1kM= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.19.1/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc= k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= -k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= k8s.io/apiserver v0.15.7/go.mod h1:d5Dbyt588GbBtUnbx9fSK+pYeqgZa32op+I1BmXiNuE= -k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.21.1 h1:wTRcid53IhxhbFt4KTrFSw8tAncfr01EP91lzfcygVg= k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= -k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= k8s.io/client-go v0.15.7/go.mod h1:QMNB76d3lKPvPQdOOnnxUF693C3hnCzUbC2umg70pWA= k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/client-go v0.19.1/go.mod h1:AZOIVSI9UUtQPeJD3zJFp15CEhSjRgAuQP5PWRJrCIQ= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= -k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM= -k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0= k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= k8s.io/cloud-provider v0.21.1 h1:V7ro0ZuxMBNYVH4lJKxCdI+h2bQ7EApC5f7sQYrQLVE= k8s.io/cloud-provider v0.21.1/go.mod h1:GgiRu7hOsZh3+VqMMbfLJJS9ZZM9A8k/YiZG8zkWpX4= -k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= k8s.io/code-generator v0.15.7/go.mod h1:G8bQwmHm2eafm5bgtX67XDZQ8CWKSGu9DekI+yN4Y5I= k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= -k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= -k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= -k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= k8s.io/component-base v0.15.7/go.mod h1:iunfIII6uq3NC3S/EhBpKv8+eQ76vwlOYdFpyIeBk7g= -k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.21.1 h1:iLpj2btXbR326s/xNQWmPNGu0gaYSjzn7IN/5i28nQw= k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= -k8s.io/component-helpers v0.21.1 h1:jhi4lHGHOV6mbPqNfITVUoLC3kNFkBQQO1rDDpnThAw= -k8s.io/component-helpers v0.21.1/go.mod h1:FtC1flbiQlosHQrLrRUulnKxE4ajgWCGy/67fT2GRlQ= -k8s.io/controller-manager v0.21.1 h1:IFbukN4M0xl3OHEasNQ91h2MLEAMk3uQrBU4+Edka8w= k8s.io/controller-manager v0.21.1/go.mod h1:8ugs8DCcHqybiwdVERhnnyGoS5Ksq/ea1p2B0CosHyc= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= @@ -1634,33 +1092,22 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-controller-manager v0.21.1 h1:cqnAgS3Tf4PMlZNZwthYPCIaPB/7Z5KzazDtWAtAqCA= -k8s.io/kube-controller-manager v0.21.1/go.mod h1:zEzQfcDGMQFFFpeWXv5GdJKIDR00LB4wp+hKYeRw7yc= -k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20190923111123-69764acb6e8e/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -1669,32 +1116,21 @@ modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.2.0-beta.2/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= -sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= sigs.k8s.io/controller-runtime v0.9.0 h1:ZIZ/dtpboPSbZYY7uUz2OzrkaBTOThx2yekLtpGB+zY= sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= -sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= -sigs.k8s.io/kustomize/kyaml v0.10.17 h1:4zrV0ym5AYa0e512q7K3Wp1u7mzoWW0xR3UHJcGWGIg= -sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= -sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0 h1:4kyxBJ/3fzLooWOZkx5NEO/pUN6woM9JBnHuyWzqkc8= -sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0/go.mod h1:DhZ52sQMJHW21+JXyA2LRUPRIxKnrNrwh+QFV+2tVA4= sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca h1:6dsH6AYQWbyZmtttJNe8Gq1cXOeS1BdV3eW37zHilAQ= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/images/Makefile b/images/Makefile index 6dd5e3e33..8d931ee26 100644 --- a/images/Makefile +++ b/images/Makefile @@ -26,17 +26,11 @@ cross: cross.linux_amd64 ## Build images used for cross building. cross.%: @$(MAKE) -C cross PLATFORM=$* -ceph.%: - @$(MAKE) -C ceph PLATFORM=$* - -nfs.%: - @$(MAKE) -C nfs PLATFORM=$* - cassandra.%: @$(MAKE) -C cassandra PLATFORM=$* -do.build.images.%: $(foreach i,$(IMAGES), $(i).%); +do.build.images.%: cassandra.%; do.build: do.build.images.$(PLATFORM) ; build.all: $(foreach p,$(PLATFORMS), do.build.images.$(p)) ; ## Build images for all platforms. diff --git a/images/ceph/Dockerfile b/images/ceph/Dockerfile deleted file mode 100644 index 5e66b9bbb..000000000 --- a/images/ceph/Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2016 The Rook Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# see Makefile for the BASEIMAGE definition -FROM BASEIMAGE - -ARG ARCH -ARG TINI_VERSION - -# Run tini as PID 1 and avoid signal handling issues -RUN curl --fail -sSL -o /tini https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${ARCH} && \ - chmod +x /tini - -COPY rook rookflex toolbox.sh set-ceph-debug-level /usr/local/bin/ -COPY ceph-csi /etc/ceph-csi -COPY ceph-monitoring /etc/ceph-monitoring -COPY rook-external /etc/rook-external/ -COPY ceph-csv-templates /etc/ceph-csv-templates -ENTRYPOINT ["/tini", "--", "/usr/local/bin/rook"] -CMD [""] diff --git a/images/ceph/Makefile b/images/ceph/Makefile deleted file mode 100755 index 267fa68e8..000000000 --- a/images/ceph/Makefile +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2016 The Rook Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include ../image.mk - -# ==================================================================================== -# Image Build Options - -ifeq ($(GOARCH),amd64) -CEPH_VERSION = v16.2.5-20210708 -else -CEPH_VERSION = v16.2.5-20210708 -endif -REGISTRY_NAME = quay.io -BASEIMAGE = $(REGISTRY_NAME)/ceph/ceph-$(GOARCH):$(CEPH_VERSION) -CEPH_IMAGE = $(BUILD_REGISTRY)/ceph-$(GOARCH) -OPERATOR_SDK_VERSION = v0.17.1 -# TODO: update to yq v4 - v3 end of life in Aug 2021 ; v4 removes the 'yq delete' cmd and changes syntax -YQ_VERSION = 3.3.0 -GOHOST := GOOS=$(GOHOSTOS) GOARCH=$(GOHOSTARCH) go - -TEMP := $(shell mktemp -d) - -ifeq ($(HOST_PLATFORM),linux_amd64) -OPERATOR_SDK_PLATFORM = x86_64-linux-gnu -INCLUDE_CSV_TEMPLATES = true -endif -ifeq ($(HOST_PLATFORM),darwin_amd64) -OPERATOR_SDK_PLATFORM = x86_64-apple-darwin -INCLUDE_CSV_TEMPLATES = true -endif -ifneq ($(INCLUDE_CSV_TEMPLATES),true) -$(info ) -$(info NOT INCLUDING OLM/CSV TEMPLATES!) -$(info ) -endif - -OPERATOR_SDK := $(TOOLS_HOST_DIR)/operator-sdk-$(OPERATOR_SDK_VERSION) -YQ := $(TOOLS_HOST_DIR)/yq-$(YQ_VERSION) -export OPERATOR_SDK YQ - -# ==================================================================================== -# Build Rook - -do.build: - @echo === container build $(CEPH_IMAGE) - @cp Dockerfile $(TEMP) - @cp toolbox.sh $(TEMP) - @cp set-ceph-debug-level $(TEMP) - @cp $(OUTPUT_DIR)/bin/linux_$(GOARCH)/rook $(TEMP) - @cp $(OUTPUT_DIR)/bin/linux_$(GOARCH)/rookflex $(TEMP) - @cp -r ../../cluster/examples/kubernetes/ceph/csi/template $(TEMP)/ceph-csi - @cp -r ../../cluster/examples/kubernetes/ceph/monitoring $(TEMP)/ceph-monitoring - @mkdir -p $(TEMP)/rook-external/test-data - @cp ../../cluster/examples/kubernetes/ceph/create-external-cluster-resources.* $(TEMP)/rook-external/ - @cp ../../cluster/examples/kubernetes/ceph/test-data/ceph-status-out $(TEMP)/rook-external/test-data/ -ifeq ($(INCLUDE_CSV_TEMPLATES),true) - @$(MAKE) CSV_TEMPLATE_DIR=$(TEMP) generate-csv-templates - @cp -r $(TEMP)/cluster/olm/ceph/templates $(TEMP)/ceph-csv-templates -else - mkdir $(TEMP)/ceph-csv-templates -endif - @cd $(TEMP) && $(SED_IN_PLACE) 's|BASEIMAGE|$(BASEIMAGE)|g' Dockerfile - @if [ -z "$(BUILD_CONTAINER_IMAGE)" ]; then\ - $(DOCKERCMD) build $(BUILD_ARGS) \ - --build-arg ARCH=$(GOARCH) \ - --build-arg TINI_VERSION=$(TINI_VERSION) \ - -t $(CEPH_IMAGE) \ - $(TEMP);\ - fi - @rm -fr $(TEMP) - -# generate CSV template files into the directory defined by the env var CSV_TEMPLATE_DIR -# CSV_TEMPLATE_DIR will be created if it doesn't already exist -generate-csv-templates: $(OPERATOR_SDK) $(YQ) ## Generate CSV templates for OLM into CSV_TEMPLATE_DIR - @if [[ -z "$(CSV_TEMPLATE_DIR)" ]]; then echo "CSV_TEMPLATE_DIR is not set"; exit 1; fi - @# first, copy the existing CRDs and OLM catalog directory to CSV_TEMPLATE_DIR - @# then, generate or copy all prerequisites into CSV_TEMPLATE_DIR (e.g., CRDs) - @# finally, generate the templates in-place using CSV_TEMPLATE_DIR as a staging dir - @mkdir -p $(CSV_TEMPLATE_DIR) - @cp -a ../../cluster $(CSV_TEMPLATE_DIR)/cluster - @set -eE;\ - BEFORE_GEN_CRD_SIZE=$$(wc -l < ../../cluster/examples/kubernetes/ceph/crds.yaml);\ - $(MAKE) -C ../.. NO_OB_OBC_VOL_GEN=true MAX_DESC_LEN=0 BUILD_CRDS_INTO_DIR=$(CSV_TEMPLATE_DIR) crds;\ - AFTER_GEN_CRD_SIZE=$$(wc -l < $(CSV_TEMPLATE_DIR)/cluster/examples/kubernetes/ceph/crds.yaml);\ - if [ "$$BEFORE_GEN_CRD_SIZE" -le "$$AFTER_GEN_CRD_SIZE" ]; then\ - echo "the new crd file must be smaller since the description fields were stripped!";\ - echo "length before $$BEFORE_GEN_CRD_SIZE";\ - echo "length after $$AFTER_GEN_CRD_SIZE";\ - exit 1;\ - fi - @OLM_CATALOG_DIR=$(CSV_TEMPLATE_DIR)/cluster/olm/ceph ../../cluster/olm/ceph/generate-rook-csv-templates.sh - @echo " === Generated CSV templates can be found at $(CSV_TEMPLATE_DIR)/cluster/olm/ceph/templates" - -$(YQ): - @echo === installing yq $(GOHOST) - @mkdir -p $(TOOLS_HOST_DIR) - @curl -JL https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(HOST_PLATFORM) -o $(YQ) - @chmod +x $(YQ) - -$(OPERATOR_SDK): - @echo === installing operator-sdk $(GOHOST) - @mkdir -p $(TOOLS_HOST_DIR) - @curl -JL -o $(TOOLS_HOST_DIR)/operator-sdk-$(OPERATOR_SDK_VERSION) \ - https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk-$(OPERATOR_SDK_VERSION)-$(OPERATOR_SDK_PLATFORM) - @chmod +x $(OPERATOR_SDK) - -csv: $(OPERATOR_SDK) $(YQ) ## Generate a CSV file for OLM. - @echo Generating CSV manifests - @cd ../.. && cluster/olm/ceph/generate-rook-csv.sh $(CSV_VERSION) $(CSV_PLATFORM) $(ROOK_OP_VERSION) - -csv-clean: $(OPERATOR_SDK) $(YQ) ## Remove existing OLM files. - @rm -fr ../../cluster/olm/ceph/deploy/* ../../cluster/olm/ceph/templates/* diff --git a/images/ceph/set-ceph-debug-level b/images/ceph/set-ceph-debug-level deleted file mode 100755 index d792961f7..000000000 --- a/images/ceph/set-ceph-debug-level +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2021 The Rook Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -if [ -z "$1" ]; then - echo "no debug level passed choose between 0 and 20" - exit 1 -fi - -CEPH_DEBUG_LEVEL=$1 -CEPH_DEBUG_FLAG=( - lockdep - context - crush - mds - mds_balancer - mds_locker - mds_log - mds_log_expire - mds_migrator - buffer - timer - filer - striper - objecter - rados - rbd - rbd_mirror - rbd_replay - journaler - objectcacher - client - osd - optracker - objclass - ms - mon - monc - paxos - tp - auth - crypto - finisher - reserver - heartbeatmap - perfcounter - rgw - rgw_sync - civetweb - javaclient - asok - throttle - refs - compressor - bluestore - bluefs - bdev - kstore - rocksdb - leveldb - memdb - fuse - mgr - mgrc - dpdk - eventtrace -) - -############# -# FUNCTIONS # -############# -check() { - ok_to_run=1 - if [[ "$CEPH_DEBUG_LEVEL" =~ ^[0-9]+$ ]]; then - if [ "$CEPH_DEBUG_LEVEL" -ge 0 ] && [ "$CEPH_DEBUG_LEVEL" -le 20 ]; then - ok_to_run=0 - fi - elif [[ "$CEPH_DEBUG_LEVEL" == "default" ]]; then - ok_to_run=0 - fi -} - -exec_ceph_command() { - local debug_level=$1 - local action=set - if [[ "$debug_level" == "default" ]]; then - action="rm" - fi - - # exec command - for flag in "${CEPH_DEBUG_FLAG[@]}"; do - ARGS=("$action" global debug_"$flag") - if [[ "$debug_level" != "default" ]]; then - ARGS+=("$debug_level") - fi - # put stdout in /dev/null since increase debug log will overflow the terminal - echo "ceph config ${ARGS[*]}" - ceph config "${ARGS[@]}" &> /dev/null & pids+=($!) - - done - echo "waiting for all the new logging configuration to be applied, this can take a few seconds" - wait "${pids[@]}" -} - -######## -# MAIN # -######## -check -if [ "$ok_to_run" -eq 0 ]; then - exec_ceph_command "$CEPH_DEBUG_LEVEL" -else - echo "Wrong debug level $CEPH_DEBUG_LEVEL" - echo "MUST be integer between 0 and 20 or 'default' to reset all values" - exit 1 -fi diff --git a/images/ceph/toolbox.sh b/images/ceph/toolbox.sh deleted file mode 100755 index 78f631b3a..000000000 --- a/images/ceph/toolbox.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash -e - -# Copyright 2016 The Rook Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -CEPH_CONFIG="/etc/ceph/ceph.conf" -MON_CONFIG="/etc/rook/mon-endpoints" -KEYRING_FILE="/etc/ceph/keyring" - -# create a ceph config file in its default location so ceph/rados tools can be used -# without specifying any arguments -write_endpoints() { - endpoints=$(cat ${MON_CONFIG}) - - # filter out the mon names - # external cluster can have numbers or hyphens in mon names, handling them in regex - # shellcheck disable=SC2001 - mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g') - - DATE=$(date) - echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}" - cat < ${CEPH_CONFIG} -[global] -mon_host = ${mon_endpoints} - -[client.admin] -keyring = ${KEYRING_FILE} -EOF -} - -# watch the endpoints config file and update if the mon endpoints ever change -watch_endpoints() { - # get the timestamp for the target of the soft link - real_path=$(realpath ${MON_CONFIG}) - initial_time=$(stat -c %Z "${real_path}") - while true; do - real_path=$(realpath ${MON_CONFIG}) - latest_time=$(stat -c %Z "${real_path}") - - if [[ "${latest_time}" != "${initial_time}" ]]; then - write_endpoints - initial_time=${latest_time} - fi - - sleep 10 - done -} - -# create the keyring file -cat < ${KEYRING_FILE} -[${ROOK_CEPH_USERNAME}] -key = ${ROOK_CEPH_SECRET} -EOF - -# write the initial config file -write_endpoints - -# continuously update the mon endpoints if they fail over -if [ "$1" != "--skip-watch" ]; then - watch_endpoints -fi diff --git a/images/cross/run.sh b/images/cross/run.sh index 038d9013c..391bcb7a0 100755 --- a/images/cross/run.sh +++ b/images/cross/run.sh @@ -14,9 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARGS="$@" +ARGS=( "$@" ) if [ $# -eq 0 ]; then - ARGS=/bin/bash + ARGS=( /bin/bash ) fi BUILDER_USER=${BUILDER_USER:-rook} @@ -24,11 +24,11 @@ BUILDER_GROUP=${BUILDER_GROUP:-rook} BUILDER_UID=${BUILDER_UID:-1000} BUILDER_GID=${BUILDER_GID:-1000} -groupadd -o -g $BUILDER_GID $BUILDER_GROUP 2> /dev/null -useradd -o -m -g $BUILDER_GID -u $BUILDER_UID $BUILDER_USER 2> /dev/null +groupadd -o -g "$BUILDER_GID" "$BUILDER_GROUP" 2> /dev/null +useradd -o -m -g "$BUILDER_GID" -u "$BUILDER_UID" "$BUILDER_USER" 2> /dev/null echo "$BUILDER_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers export HOME=/home/${BUILDER_USER} echo "127.0.0.1 $(cat /etc/hostname)" >> /etc/hosts [[ -S /var/run/docker.sock ]] && chmod 666 /var/run/docker.sock -chown -R $BUILDER_UID:$BUILDER_GID $HOME -exec chpst -u :$BUILDER_UID:$BUILDER_GID ${ARGS} +chown -R "$BUILDER_UID":"$BUILDER_GID" "$HOME" +exec chpst -u :"$BUILDER_UID":"$BUILDER_GID" "${ARGS[@]}" diff --git a/images/nfs/Dockerfile b/images/nfs/Dockerfile deleted file mode 100644 index 4af6816a2..000000000 --- a/images/nfs/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2018 The Rook Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#Portions of this file came from https://github.com/mitcdh/docker-nfs-ganesha/blob/master/Dockerfile, which uses the same license. - -FROM NFS_BASEIMAGE -# Build ganesha from source, installing deps and removing them in one line. -# Why? -# 1. Root_Id_Squash, only present in >= 2.4.0.3 which is not yet packaged -# 2. Set NFS_V4_RECOV_ROOT to /export -# 3. Use device major/minor as fsid major/minor to work on OverlayFS - -RUN DEBIAN_FRONTEND=noninteractive \ - && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 10353E8834DC57CA \ - && echo "deb http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-3.0/ubuntu xenial main" > /etc/apt/sources.list.d/nfs-ganesha.list \ - && echo "deb http://ppa.launchpad.net/nfs-ganesha/libntirpc-3.0/ubuntu xenial main" > /etc/apt/sources.list.d/libntirpc.list \ - && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 13e01b7b3fe869a9 \ - && echo "deb http://ppa.launchpad.net/gluster/glusterfs-6/ubuntu xenial main" > /etc/apt/sources.list.d/glusterfs.list \ - && apt-get update \ - && apt-get install -y netbase nfs-common dbus nfs-ganesha nfs-ganesha-vfs glusterfs-common xfsprogs \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ - && mkdir -p /run/rpcbind /export /var/run/dbus \ - && touch /run/rpcbind/rpcbind.xdr /run/rpcbind/portmap.xdr \ - && chmod 755 /run/rpcbind/* \ - && chown messagebus:messagebus /var/run/dbus - -EXPOSE 2049 38465-38467 662 111/udp 111 - -COPY rook /usr/local/bin/ - -ENTRYPOINT ["/usr/local/bin/rook"] -CMD [""] diff --git a/images/nfs/Makefile b/images/nfs/Makefile deleted file mode 100755 index 8f164a0bb..000000000 --- a/images/nfs/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2018 The Rook Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include ../image.mk - -# ==================================================================================== -# Image Build Options - -NFS_IMAGE = $(BUILD_REGISTRY)/nfs-$(GOARCH) - -NFS_BASE ?= ubuntu:xenial - -ifeq ($(GOARCH),amd64) -NFS_BASEIMAGE = $(NFS_BASE) -else ifeq ($(GOARCH),arm64) -NFS_BASEIMAGE = arm64v8/$(NFS_BASE) -endif - -TEMP := $(shell mktemp -d) - -# ==================================================================================== -# Build Rook NFS - -# since this is a leaf image we avoid leaving around a lot of dangling images -# by removing the last build of the final nfs image -OLD_IMAGE_ID := $(shell $(DOCKERCMD) images -q $(NFS_IMAGE)) -CURRENT_IMAGE_ID := $$($(DOCKERCMD) images -q $(NFS_IMAGE)) -IMAGE_FILENAME := $(IMAGE_OUTPUT_DIR)/nfs.tar.gz - -do.build: - @echo === container build $(NFS_IMAGE) - @cp Dockerfile $(TEMP) - @cp $(OUTPUT_DIR)/bin/linux_$(GOARCH)/rook $(TEMP) - @cd $(TEMP) && $(SED_IN_PLACE) 's|NFS_BASEIMAGE|$(NFS_BASEIMAGE)|g' Dockerfile - @$(DOCKERCMD) build $(BUILD_ARGS) \ - -t $(NFS_IMAGE) \ - $(TEMP) - @[ "$(OLD_IMAGE_ID)" != "$(CURRENT_IMAGE_ID)" ] && [ -n "$(OLD_IMAGE_ID)" ] && $(DOCKERCMD) rmi $(OLD_IMAGE_ID) || true - @if [ ! -e "$(IMAGE_FILENAME)" ] || [ "$(OLD_IMAGE_ID)" != "$(CURRENT_IMAGE_ID)" ] || [ -n "$(OLD_IMAGE_ID)" ]; then \ - echo === saving image $(NFS_IMAGE); \ - mkdir -p $(IMAGE_OUTPUT_DIR); \ - $(DOCKERCMD) save $(NFS_IMAGE) | gzip -c > $(IMAGE_FILENAME); \ - fi - @rm -fr $(TEMP) diff --git a/pkg/apis/cassandra.rook.io/v1alpha1/register.go b/pkg/apis/cassandra.rook.io/v1alpha1/register.go index aa04a8c97..4d4d42d3f 100644 --- a/pkg/apis/cassandra.rook.io/v1alpha1/register.go +++ b/pkg/apis/cassandra.rook.io/v1alpha1/register.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "github.com/rook/rook/pkg/apis/cassandra.rook.io" + cassandrarookio "github.com/rook/cassandra/pkg/apis/cassandra.rook.io" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/apis/cassandra.rook.io/v1alpha1/types.go b/pkg/apis/cassandra.rook.io/v1alpha1/types.go index 99881d30f..018e0c454 100644 --- a/pkg/apis/cassandra.rook.io/v1alpha1/types.go +++ b/pkg/apis/cassandra.rook.io/v1alpha1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "github.com/rook/rook/pkg/apis/rook.io" + "github.com/rook/cassandra/pkg/apis/rook.io" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/apis/cassandra.rook.io/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/cassandra.rook.io/v1alpha1/zz_generated.deepcopy.go index c1b46e0bb..7fb7742a8 100644 --- a/pkg/apis/cassandra.rook.io/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/cassandra.rook.io/v1alpha1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1alpha1 import ( - rookio "github.com/rook/rook/pkg/apis/rook.io" + rookio "github.com/rook/cassandra/pkg/apis/rook.io" v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/pkg/apis/ceph.rook.io/register.go b/pkg/apis/ceph.rook.io/register.go deleted file mode 100644 index d722c3836..000000000 --- a/pkg/apis/ceph.rook.io/register.go +++ /dev/null @@ -1,5 +0,0 @@ -package cephrookio - -const ( - CustomResourceGroupName = "ceph.rook.io" -) diff --git a/pkg/apis/ceph.rook.io/v1/annotations.go b/pkg/apis/ceph.rook.io/v1/annotations.go deleted file mode 100644 index 07d7f1f19..000000000 --- a/pkg/apis/ceph.rook.io/v1/annotations.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - rook "github.com/rook/rook/pkg/apis/rook.io" -) - -// AnnotationsSpec is the main spec annotation for all daemons -// +kubebuilder:pruning:PreserveUnknownFields -// +nullable -type AnnotationsSpec map[rook.KeyType]rook.Annotations - -func (a AnnotationsSpec) All() rook.Annotations { - return a[KeyAll] -} - -// GetMgrAnnotations returns the Annotations for the MGR service -func GetMgrAnnotations(a AnnotationsSpec) rook.Annotations { - return mergeAllAnnotationsWithKey(a, KeyMgr) -} - -// GetMonAnnotations returns the Annotations for the MON service -func GetMonAnnotations(a AnnotationsSpec) rook.Annotations { - return mergeAllAnnotationsWithKey(a, KeyMon) -} - -// GetOSDPrepareAnnotations returns the annotations for the OSD service -func GetOSDPrepareAnnotations(a AnnotationsSpec) rook.Annotations { - return mergeAllAnnotationsWithKey(a, KeyOSDPrepare) -} - -// GetOSDAnnotations returns the annotations for the OSD service -func GetOSDAnnotations(a AnnotationsSpec) rook.Annotations { - return mergeAllAnnotationsWithKey(a, KeyOSD) -} - -// GetCleanupAnnotations returns the Annotations for the cleanup job -func GetCleanupAnnotations(a AnnotationsSpec) rook.Annotations { - return mergeAllAnnotationsWithKey(a, KeyCleanup) -} - -func mergeAllAnnotationsWithKey(a AnnotationsSpec, name rook.KeyType) rook.Annotations { - all := a.All() - if all != nil { - return all.Merge(a[name]) - } - return a[name] -} diff --git a/pkg/apis/ceph.rook.io/v1/annotations_test.go b/pkg/apis/ceph.rook.io/v1/annotations_test.go deleted file mode 100644 index 6ee49f967..000000000 --- a/pkg/apis/ceph.rook.io/v1/annotations_test.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/ghodss/yaml" - "github.com/stretchr/testify/assert" -) - -func TestCephAnnotationsMerge(t *testing.T) { - // No annotations defined - testAnnotations := AnnotationsSpec{} - a := GetOSDAnnotations(testAnnotations) - assert.Nil(t, a) - - // Only a specific component annotations without "all" - testAnnotations = AnnotationsSpec{ - "mgr": {"mgrkey": "mgrval"}, - "mon": {"monkey": "monval"}, - "osd": {"osdkey": "osdval"}, - "rgw": {"rgwkey": "rgwval"}, - "rbdmirror": {"rbdmirrorkey": "rbdmirrorval"}, - } - a = GetMgrAnnotations(testAnnotations) - assert.Equal(t, "mgrval", a["mgrkey"]) - assert.Equal(t, 1, len(a)) - a = GetMonAnnotations(testAnnotations) - assert.Equal(t, "monval", a["monkey"]) - assert.Equal(t, 1, len(a)) - a = GetOSDAnnotations(testAnnotations) - assert.Equal(t, "osdval", a["osdkey"]) - assert.Equal(t, 1, len(a)) - - // No annotations matching the component - testAnnotations = AnnotationsSpec{ - "mgr": {"mgrkey": "mgrval"}, - } - a = GetMonAnnotations(testAnnotations) - assert.Nil(t, a) - - // Merge with "all" - testAnnotations = AnnotationsSpec{ - "all": {"allkey1": "allval1", "allkey2": "allval2"}, - "mgr": {"mgrkey": "mgrval"}, - } - a = GetMonAnnotations(testAnnotations) - assert.Equal(t, "allval1", a["allkey1"]) - assert.Equal(t, "allval2", a["allkey2"]) - assert.Equal(t, 2, len(a)) - a = GetMgrAnnotations(testAnnotations) - assert.Equal(t, "mgrval", a["mgrkey"]) - assert.Equal(t, "allval1", a["allkey1"]) - assert.Equal(t, "allval2", a["allkey2"]) - assert.Equal(t, 3, len(a)) -} - -func TestAnnotationsSpec(t *testing.T) { - specYaml := []byte(` -mgr: - foo: bar - hello: world -mon: -`) - - // convert the raw spec yaml into JSON - rawJSON, err := yaml.YAMLToJSON(specYaml) - assert.Nil(t, err) - - // unmarshal the JSON into a strongly typed annotations spec object - var annotations AnnotationsSpec - err = json.Unmarshal(rawJSON, &annotations) - assert.Nil(t, err) - - // the unmarshalled annotations spec should equal the expected spec below - expected := AnnotationsSpec{ - "mgr": map[string]string{ - "foo": "bar", - "hello": "world", - }, - "mon": nil, - } - assert.Equal(t, expected, annotations) -} diff --git a/pkg/apis/ceph.rook.io/v1/cleanup.go b/pkg/apis/ceph.rook.io/v1/cleanup.go deleted file mode 100644 index e17e82aee..000000000 --- a/pkg/apis/ceph.rook.io/v1/cleanup.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -const ( - // SanitizeDataSourceZero uses /dev/zero as sanitize source - SanitizeDataSourceZero SanitizeDataSourceProperty = "zero" - - // SanitizeDataSourceRandom uses `shred's default entropy source - SanitizeDataSourceRandom SanitizeDataSourceProperty = "random" - - // SanitizeMethodComplete will sanitize everything on the disk - SanitizeMethodComplete SanitizeMethodProperty = "complete" - - // SanitizeMethodQuick will sanitize metadata only on the disk - SanitizeMethodQuick SanitizeMethodProperty = "quick" - - // DeleteDataDirOnHostsConfirmation represents the validation to destroy dataDirHostPath - DeleteDataDirOnHostsConfirmation CleanupConfirmationProperty = "yes-really-destroy-data" -) - -// HasDataDirCleanPolicy returns whether the cluster has a data dir policy -func (c *CleanupPolicySpec) HasDataDirCleanPolicy() bool { - return c.Confirmation == DeleteDataDirOnHostsConfirmation -} - -func (c *SanitizeMethodProperty) String() string { - return string(*c) -} - -func (c *SanitizeDataSourceProperty) String() string { - return string(*c) -} diff --git a/pkg/apis/ceph.rook.io/v1/cluster.go b/pkg/apis/ceph.rook.io/v1/cluster.go deleted file mode 100644 index a5c7af4a2..000000000 --- a/pkg/apis/ceph.rook.io/v1/cluster.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "reflect" - "strconv" - - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" -) - -// compile-time assertions ensures CephCluster implements webhook.Validator so a webhook builder -// will be registered for the validating webhook. -var _ webhook.Validator = &CephCluster{} - -func (c *ClusterSpec) IsStretchCluster() bool { - return c.Mon.StretchCluster != nil && len(c.Mon.StretchCluster.Zones) > 0 -} - -func (c *CephCluster) ValidateCreate() error { - logger.Infof("validate create cephcluster %q", c.ObjectMeta.Name) - //If external mode enabled, then check if other fields are empty - if c.Spec.External.Enable { - if c.Spec.Mon != (MonSpec{}) || c.Spec.Dashboard != (DashboardSpec{}) || !reflect.DeepEqual(c.Spec.Monitoring, (MonitoringSpec{})) || c.Spec.DisruptionManagement != (DisruptionManagementSpec{}) || len(c.Spec.Mgr.Modules) > 0 || len(c.Spec.Network.Provider) > 0 || len(c.Spec.Network.Selectors) > 0 { - return errors.New("invalid create : external mode enabled cannot have mon,dashboard,monitoring,network,disruptionManagement,storage fields in CR") - } - } - return nil -} - -func (c *CephCluster) ValidateUpdate(old runtime.Object) error { - logger.Infof("validate update cephcluster %q", c.ObjectMeta.Name) - occ := old.(*CephCluster) - return validateUpdatedCephCluster(c, occ) -} - -func (c *CephCluster) ValidateDelete() error { - return nil -} - -func validateUpdatedCephCluster(updatedCephCluster *CephCluster, found *CephCluster) error { - if updatedCephCluster.Spec.Mon.Count > 0 && updatedCephCluster.Spec.Mon.Count%2 == 0 { - return errors.Errorf("mon count %d cannot be even, must be odd to support a healthy quorum", updatedCephCluster.Spec.Mon.Count) - } - - if updatedCephCluster.Spec.DataDirHostPath != found.Spec.DataDirHostPath { - return errors.Errorf("invalid update: DataDirHostPath change from %q to %q is not allowed", found.Spec.DataDirHostPath, updatedCephCluster.Spec.DataDirHostPath) - } - - if updatedCephCluster.Spec.Network.HostNetwork != found.Spec.Network.HostNetwork { - return errors.Errorf("invalid update: HostNetwork change from %q to %q is not allowed", strconv.FormatBool(found.Spec.Network.HostNetwork), strconv.FormatBool(updatedCephCluster.Spec.Network.HostNetwork)) - } - - if updatedCephCluster.Spec.Network.Provider != found.Spec.Network.Provider { - return errors.Errorf("invalid update: Provider change from %q to %q is not allowed", found.Spec.Network.Provider, updatedCephCluster.Spec.Network.Provider) - } - - for i, storageClassDeviceSet := range updatedCephCluster.Spec.Storage.StorageClassDeviceSets { - if storageClassDeviceSet.Encrypted != found.Spec.Storage.StorageClassDeviceSets[i].Encrypted { - return errors.Errorf("invalid update: StorageClassDeviceSet %q encryption change from %t to %t is not allowed", storageClassDeviceSet.Name, found.Spec.Storage.StorageClassDeviceSets[i].Encrypted, storageClassDeviceSet.Encrypted) - } - } - - return nil -} - -func (c *CephCluster) GetStatusConditions() *[]Condition { - return &c.Status.Conditions -} diff --git a/pkg/apis/ceph.rook.io/v1/cluster_test.go b/pkg/apis/ceph.rook.io/v1/cluster_test.go deleted file mode 100644 index 6fffb9ff9..000000000 --- a/pkg/apis/ceph.rook.io/v1/cluster_test.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func Test_validateUpdatedCephCluster(t *testing.T) { - type args struct { - updatedCephCluster *CephCluster - found *CephCluster - } - tests := []struct { - name string - args args - wantErr bool - }{ - {"everything is ok", args{&CephCluster{}, &CephCluster{}}, false}, - {"good mon count", args{&CephCluster{Spec: ClusterSpec{Mon: MonSpec{Count: 1}}}, &CephCluster{}}, false}, - {"even mon count", args{&CephCluster{Spec: ClusterSpec{Mon: MonSpec{Count: 2}}}, &CephCluster{}}, true}, - {"good mon count", args{&CephCluster{Spec: ClusterSpec{Mon: MonSpec{Count: 3}}}, &CephCluster{}}, false}, - {"changed DataDirHostPath", args{&CephCluster{Spec: ClusterSpec{DataDirHostPath: "foo"}}, &CephCluster{Spec: ClusterSpec{DataDirHostPath: "bar"}}}, true}, - {"changed HostNetwork", args{&CephCluster{Spec: ClusterSpec{Network: NetworkSpec{HostNetwork: false}}}, &CephCluster{Spec: ClusterSpec{Network: NetworkSpec{HostNetwork: true}}}}, true}, - {"changed storageClassDeviceSet encryption", args{&CephCluster{Spec: ClusterSpec{Storage: StorageScopeSpec{StorageClassDeviceSets: []StorageClassDeviceSet{{Name: "foo", Encrypted: false}}}}}, &CephCluster{Spec: ClusterSpec{Storage: StorageScopeSpec{StorageClassDeviceSets: []StorageClassDeviceSet{{Name: "foo", Encrypted: true}}}}}}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := validateUpdatedCephCluster(tt.args.updatedCephCluster, tt.args.found); (err != nil) != tt.wantErr { - t.Errorf("validateUpdatedCephCluster() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestCephClusterValidateCreate(t *testing.T) { - c := &CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph", - }, - Spec: ClusterSpec{ - DataDirHostPath: "/var/lib/rook", - }, - } - err := c.ValidateCreate() - assert.NoError(t, err) - c.Spec.External.Enable = true - c.Spec.Monitoring = MonitoringSpec{ - Enabled: true, - RulesNamespace: "rook-ceph", - } - err = c.ValidateCreate() - assert.Error(t, err) -} - -func TestCephClusterValidateUpdate(t *testing.T) { - c := &CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph", - }, - Spec: ClusterSpec{ - DataDirHostPath: "/var/lib/rook", - }, - } - err := c.ValidateCreate() - assert.NoError(t, err) - - // Updating the CRD specs with invalid values - uc := c.DeepCopy() - uc.Spec.DataDirHostPath = "var/rook" - err = uc.ValidateUpdate(c) - assert.Error(t, err) -} diff --git a/pkg/apis/ceph.rook.io/v1/doc.go b/pkg/apis/ceph.rook.io/v1/doc.go deleted file mode 100644 index b8774f976..000000000 --- a/pkg/apis/ceph.rook.io/v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register - -// Package v1 is the v1 version of the API. -// +groupName=ceph.rook.io -package v1 diff --git a/pkg/apis/ceph.rook.io/v1/keys.go b/pkg/apis/ceph.rook.io/v1/keys.go deleted file mode 100644 index 9f2fc2a53..000000000 --- a/pkg/apis/ceph.rook.io/v1/keys.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - rookcore "github.com/rook/rook/pkg/apis/rook.io" -) - -const ( - KeyAll = "all" - KeyMds rookcore.KeyType = "mds" - KeyMon rookcore.KeyType = "mon" - KeyMonArbiter rookcore.KeyType = "arbiter" - KeyMgr rookcore.KeyType = "mgr" - KeyOSDPrepare rookcore.KeyType = "prepareosd" - KeyOSD rookcore.KeyType = "osd" - KeyCleanup rookcore.KeyType = "cleanup" - KeyMonitoring rookcore.KeyType = "monitoring" -) diff --git a/pkg/apis/ceph.rook.io/v1/labels.go b/pkg/apis/ceph.rook.io/v1/labels.go deleted file mode 100644 index aed8af9ae..000000000 --- a/pkg/apis/ceph.rook.io/v1/labels.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - rook "github.com/rook/rook/pkg/apis/rook.io" -) - -// LabelsSpec is the main spec label for all daemons -type LabelsSpec map[rook.KeyType]rook.Labels - -func (a LabelsSpec) All() rook.Labels { - return a[KeyAll] -} - -// GetMgrLabels returns the Labels for the MGR service -func GetMgrLabels(a LabelsSpec) rook.Labels { - return mergeAllLabelsWithKey(a, KeyMgr) -} - -// GetMonLabels returns the Labels for the MON service -func GetMonLabels(a LabelsSpec) rook.Labels { - return mergeAllLabelsWithKey(a, KeyMon) -} - -// GetOSDPrepareLabels returns the Labels for the OSD prepare job -func GetOSDPrepareLabels(a LabelsSpec) rook.Labels { - return mergeAllLabelsWithKey(a, KeyOSDPrepare) -} - -// GetOSDLabels returns the Labels for the OSD service -func GetOSDLabels(a LabelsSpec) rook.Labels { - return mergeAllLabelsWithKey(a, KeyOSD) -} - -// GetCleanupLabels returns the Labels for the cleanup job -func GetCleanupLabels(a LabelsSpec) rook.Labels { - return mergeAllLabelsWithKey(a, KeyCleanup) -} - -// GetMonitoringLabels returns the Labels for monitoring resources -func GetMonitoringLabels(a LabelsSpec) rook.Labels { - return mergeAllLabelsWithKey(a, KeyMonitoring) -} - -func mergeAllLabelsWithKey(a LabelsSpec, name rook.KeyType) rook.Labels { - all := a.All() - if all != nil { - return all.Merge(a[name]) - } - return a[name] -} diff --git a/pkg/apis/ceph.rook.io/v1/labels_test.go b/pkg/apis/ceph.rook.io/v1/labels_test.go deleted file mode 100644 index 546f99962..000000000 --- a/pkg/apis/ceph.rook.io/v1/labels_test.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/ghodss/yaml" - "github.com/stretchr/testify/assert" -) - -func TestCephLabelsMerge(t *testing.T) { - // No Labels defined - testLabels := LabelsSpec{} - a := GetOSDLabels(testLabels) - assert.Nil(t, a) - - // Only a specific component labels without "all" - testLabels = LabelsSpec{ - "mgr": {"mgrkey": "mgrval"}, - "mon": {"monkey": "monval"}, - "osd": {"osdkey": "osdval"}, - "rgw": {"rgwkey": "rgwval"}, - "rbdmirror": {"rbdmirrorkey": "rbdmirrorval"}, - } - a = GetMgrLabels(testLabels) - assert.Equal(t, "mgrval", a["mgrkey"]) - assert.Equal(t, 1, len(a)) - a = GetMonLabels(testLabels) - assert.Equal(t, "monval", a["monkey"]) - assert.Equal(t, 1, len(a)) - a = GetOSDLabels(testLabels) - assert.Equal(t, "osdval", a["osdkey"]) - assert.Equal(t, 1, len(a)) - - // No Labels matching the component - testLabels = LabelsSpec{ - "mgr": {"mgrkey": "mgrval"}, - } - a = GetMonLabels(testLabels) - assert.Nil(t, a) - - // Merge with "all" - testLabels = LabelsSpec{ - "all": {"allkey1": "allval1", "allkey2": "allval2"}, - "mgr": {"mgrkey": "mgrval"}, - } - a = GetMonLabels(testLabels) - assert.Equal(t, "allval1", a["allkey1"]) - assert.Equal(t, "allval2", a["allkey2"]) - assert.Equal(t, 2, len(a)) - a = GetMgrLabels(testLabels) - assert.Equal(t, "mgrval", a["mgrkey"]) - assert.Equal(t, "allval1", a["allkey1"]) - assert.Equal(t, "allval2", a["allkey2"]) - assert.Equal(t, 3, len(a)) -} - -func TestLabelsSpec(t *testing.T) { - specYaml := []byte(` -mgr: - foo: bar - hello: world -mon: -`) - - // convert the raw spec yaml into JSON - rawJSON, err := yaml.YAMLToJSON(specYaml) - assert.Nil(t, err) - - // unmarshal the JSON into a strongly typed Labels spec object - var Labels LabelsSpec - err = json.Unmarshal(rawJSON, &Labels) - assert.Nil(t, err) - - // the unmarshalled Labels spec should equal the expected spec below - expected := LabelsSpec{ - "mgr": map[string]string{ - "foo": "bar", - "hello": "world", - }, - "mon": nil, - } - assert.Equal(t, expected, Labels) -} diff --git a/pkg/apis/ceph.rook.io/v1/livenessprobe.go b/pkg/apis/ceph.rook.io/v1/livenessprobe.go deleted file mode 100644 index a1839740a..000000000 --- a/pkg/apis/ceph.rook.io/v1/livenessprobe.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" -) - -// GetMonLivenessProbe returns the liveness probe for the MON service -func GetMonLivenessProbe(l CephClusterHealthCheckSpec) *corev1.Probe { - return l.LivenessProbe[ResourcesKeyMon].Probe -} - -// GetMgrLivenessProbe returns the liveness probe for the MGR service -func GetMgrLivenessProbe(l CephClusterHealthCheckSpec) *corev1.Probe { - return l.LivenessProbe[ResourcesKeyMgr].Probe -} - -// GetOSDLivenessProbe returns the liveness probe for the OSD service -func GetOSDLivenessProbe(l CephClusterHealthCheckSpec) *corev1.Probe { - return l.LivenessProbe[ResourcesKeyOSD].Probe -} - -// GetMdsLivenessProbe returns the liveness probe for the MDS service -func GetMdsLivenessProbe(l CephClusterHealthCheckSpec) *corev1.Probe { - return l.LivenessProbe[ResourcesKeyMDS].Probe -} diff --git a/pkg/apis/ceph.rook.io/v1/mirror.go b/pkg/apis/ceph.rook.io/v1/mirror.go deleted file mode 100644 index 205aad78c..000000000 --- a/pkg/apis/ceph.rook.io/v1/mirror.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// HasPeers returns whether the RBD mirror daemon has peer and should connect to it -func (m *MirroringPeerSpec) HasPeers() bool { - return len(m.SecretNames) != 0 -} - -func (m *FSMirroringSpec) SnapShotScheduleEnabled() bool { - return len(m.SnapshotSchedules) != 0 -} diff --git a/pkg/apis/ceph.rook.io/v1/network.go b/pkg/apis/ceph.rook.io/v1/network.go deleted file mode 100644 index 68c0296b3..000000000 --- a/pkg/apis/ceph.rook.io/v1/network.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// IsMultus get whether to use multus network provider -func (n *NetworkSpec) IsMultus() bool { - return n.Provider == "multus" -} - -// IsHost get whether to use host network provider. This method also preserve -// compatibility with the old HostNetwork field. -func (n *NetworkSpec) IsHost() bool { - return (n.HostNetwork && n.Provider == "") || n.Provider == "host" -} diff --git a/pkg/apis/ceph.rook.io/v1/network_test.go b/pkg/apis/ceph.rook.io/v1/network_test.go deleted file mode 100644 index 141fb88da..000000000 --- a/pkg/apis/ceph.rook.io/v1/network_test.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/ghodss/yaml" - "github.com/stretchr/testify/assert" -) - -func TestNetworkCephSpecLegacy(t *testing.T) { - netSpecYAML := []byte(`hostNetwork: true`) - - rawJSON, err := yaml.YAMLToJSON(netSpecYAML) - assert.Nil(t, err) - - var net NetworkSpec - - err = json.Unmarshal(rawJSON, &net) - assert.Nil(t, err) - - expected := NetworkSpec{HostNetwork: true} - - assert.Equal(t, expected, net) -} - -func TestNetworkCephIsHostLegacy(t *testing.T) { - net := NetworkSpec{HostNetwork: true} - - assert.True(t, net.IsHost()) -} - -func TestNetworkSpec(t *testing.T) { - netSpecYAML := []byte(` -provider: host -selectors: - server: enp2s0f0 - broker: enp2s0f0`) - - rawJSON, err := yaml.YAMLToJSON(netSpecYAML) - assert.Nil(t, err) - - var net NetworkSpec - - err = json.Unmarshal(rawJSON, &net) - assert.Nil(t, err) - - expected := NetworkSpec{ - Provider: "host", - Selectors: map[string]string{ - "server": "enp2s0f0", - "broker": "enp2s0f0", - }, - } - - assert.Equal(t, expected, net) -} diff --git a/pkg/apis/ceph.rook.io/v1/object.go b/pkg/apis/ceph.rook.io/v1/object.go deleted file mode 100644 index 51201404d..000000000 --- a/pkg/apis/ceph.rook.io/v1/object.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" -) - -// compile-time assertions ensures CephObjectStore implements webhook.Validator so a webhook builder -// will be registered for the validating webhook. -var _ webhook.Validator = &CephObjectStore{} - -const ServiceServingCertKey = "service.beta.openshift.io/serving-cert-secret-name" - -func (s *ObjectStoreSpec) IsMultisite() bool { - return s.Zone.Name != "" -} - -func (s *ObjectStoreSpec) IsTLSEnabled() bool { - return s.Gateway.SecurePort != 0 && (s.Gateway.SSLCertificateRef != "" || s.GetServiceServingCert() != "") -} - -func (s *ObjectStoreSpec) GetPort() (int32, error) { - if s.IsTLSEnabled() { - return s.Gateway.SecurePort, nil - } else if s.Gateway.Port != 0 { - return s.Gateway.Port, nil - } - return -1, errors.New("At least one of Port or SecurePort should be non-zero") -} - -func (s *ObjectStoreSpec) IsExternal() bool { - return len(s.Gateway.ExternalRgwEndpoints) != 0 -} - -func (s *ObjectRealmSpec) IsPullRealm() bool { - return s.Pull.Endpoint != "" -} - -func (o *CephObjectStore) ValidateCreate() error { - logger.Infof("validate create cephobjectstore %v", o) - if err := ValidateObjectSpec(o); err != nil { - return err - } - return nil -} - -// ValidateObjectSpec validate the object store arguments -func ValidateObjectSpec(gs *CephObjectStore) error { - if gs.Name == "" { - return errors.New("missing name") - } - if gs.Namespace == "" { - return errors.New("missing namespace") - } - securePort := gs.Spec.Gateway.SecurePort - if securePort < 0 || securePort > 65535 { - return errors.Errorf("securePort value of %d must be between 0 and 65535", securePort) - } - if gs.Spec.Gateway.Port <= 0 && gs.Spec.Gateway.SecurePort <= 0 { - return errors.New("invalid create: either of port or securePort fields should be not be zero") - } - return nil -} - -func (o *CephObjectStore) ValidateUpdate(old runtime.Object) error { - logger.Info("validate update cephobjectstore") - err := ValidateObjectSpec(o) - if err != nil { - return err - } - return nil -} - -func (o *CephObjectStore) ValidateDelete() error { - return nil -} - -func (s *ObjectStoreSpec) GetServiceServingCert() string { - if s.Gateway.Service != nil { - return s.Gateway.Service.Annotations[ServiceServingCertKey] - } - return "" -} - -func (c *CephObjectStore) GetStatusConditions() *[]Condition { - return &c.Status.Conditions -} diff --git a/pkg/apis/ceph.rook.io/v1/object_test.go b/pkg/apis/ceph.rook.io/v1/object_test.go deleted file mode 100644 index 889fa3b78..000000000 --- a/pkg/apis/ceph.rook.io/v1/object_test.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "testing" - - rook "github.com/rook/rook/pkg/apis/rook.io" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestValidateObjectStoreSpec(t *testing.T) { - o := &CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-store", - Namespace: "rook-ceph", - }, - Spec: ObjectStoreSpec{ - Gateway: GatewaySpec{ - Port: 1, - SecurePort: 0, - }, - }, - } - err := ValidateObjectSpec(o) - assert.NoError(t, err) - - // when both port and securePort are o - o.Spec.Gateway.Port = 0 - err = ValidateObjectSpec(o) - assert.Error(t, err) - - // when securePort is greater than 65535 - o.Spec.Gateway.SecurePort = 65536 - err = ValidateObjectSpec(o) - assert.Error(t, err) - - // when name is empty - o.ObjectMeta.Name = "" - err = ValidateObjectSpec(o) - assert.Error(t, err) - - // when namespace is empty - o.ObjectMeta.Namespace = "" - err = ValidateObjectSpec(o) - assert.Error(t, err) -} -func TestIsTLSEnabled(t *testing.T) { - objStore := &CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-store", - Namespace: "rook-ceph", - }, - Spec: ObjectStoreSpec{ - Gateway: GatewaySpec{ - Port: 1, - SecurePort: 0, - }, - }, - } - IsTLS := objStore.Spec.IsTLSEnabled() - assert.False(t, IsTLS) - - // only securePort is set without certs - objStore.Spec.Gateway.SecurePort = 443 - IsTLS = objStore.Spec.IsTLSEnabled() - assert.False(t, IsTLS) - - // when SSLCertificateRef is set with securePort - objStore.Spec.Gateway.SSLCertificateRef = "my-tls-cert" - IsTLS = objStore.Spec.IsTLSEnabled() - assert.True(t, IsTLS) - - // when service serving cert is used - objStore.Spec.Gateway.SSLCertificateRef = "" - objStore.Spec.Gateway.Service = &(RGWServiceSpec{Annotations: rook.Annotations{ServiceServingCertKey: "rgw-cert"}}) - IsTLS = objStore.Spec.IsTLSEnabled() - assert.True(t, IsTLS) - - // when cert are set but securePort unset - objStore.Spec.Gateway.SecurePort = 0 - IsTLS = objStore.Spec.IsTLSEnabled() - assert.False(t, IsTLS) -} diff --git a/pkg/apis/ceph.rook.io/v1/placement.go b/pkg/apis/ceph.rook.io/v1/placement.go deleted file mode 100644 index 5bbd74d9e..000000000 --- a/pkg/apis/ceph.rook.io/v1/placement.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1 - -import ( - v1 "k8s.io/api/core/v1" -) - -func (p PlacementSpec) All() Placement { - return p[KeyAll] -} - -// ApplyToPodSpec adds placement to a pod spec -func (p Placement) ApplyToPodSpec(t *v1.PodSpec) { - if t.Affinity == nil { - t.Affinity = &v1.Affinity{} - } - if p.NodeAffinity != nil { - t.Affinity.NodeAffinity = p.mergeNodeAffinity(t.Affinity.NodeAffinity) - } - if p.PodAffinity != nil { - t.Affinity.PodAffinity = p.PodAffinity.DeepCopy() - } - if p.PodAntiAffinity != nil { - t.Affinity.PodAntiAffinity = p.PodAntiAffinity.DeepCopy() - } - if p.Tolerations != nil { - t.Tolerations = p.mergeTolerations(t.Tolerations) - } - if p.TopologySpreadConstraints != nil { - t.TopologySpreadConstraints = p.TopologySpreadConstraints - } -} - -func (p Placement) mergeNodeAffinity(nodeAffinity *v1.NodeAffinity) *v1.NodeAffinity { - // no node affinity is specified yet, so return the placement's nodeAffinity - result := p.NodeAffinity.DeepCopy() - if nodeAffinity == nil { - return result - } - - // merge the preferred node affinity that was already specified, and the placement's nodeAffinity - result.PreferredDuringSchedulingIgnoredDuringExecution = append( - nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, - p.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution...) - - // nothing to merge if no affinity was passed in - if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { - return result - } - // take the desired affinity if there was none on the placement - if p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { - result.RequiredDuringSchedulingIgnoredDuringExecution = nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution - return result - } - // take the desired affinity node selectors without the need to merge - if len(nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { - return result - } - // take the placement affinity node selectors without the need to merge - if len(p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { - // take the placement from the first option since the second isn't specified - result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = - nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms - return result - } - - // merge the match expressions together since they are defined in both placements - // this will only work if we want an "and" between all the expressions, more complex conditions won't work with this merge - var nodeTerm v1.NodeSelectorTerm - nodeTerm.MatchExpressions = append( - nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions, - p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions...) - nodeTerm.MatchFields = append( - nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields, - p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields...) - result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0] = nodeTerm - - return result -} - -func (p Placement) mergeTolerations(tolerations []v1.Toleration) []v1.Toleration { - // no toleration is specified yet, return placement's toleration - if tolerations == nil { - return p.Tolerations - } - - return append(p.Tolerations, tolerations...) -} - -// Merge returns a Placement which results from merging the attributes of the -// original Placement with the attributes of the supplied one. The supplied -// Placement's attributes will override the original ones if defined. -func (p Placement) Merge(with Placement) Placement { - ret := p - if with.NodeAffinity != nil { - ret.NodeAffinity = with.NodeAffinity - } - if with.PodAffinity != nil { - ret.PodAffinity = with.PodAffinity - } - if with.PodAntiAffinity != nil { - ret.PodAntiAffinity = with.PodAntiAffinity - } - if with.Tolerations != nil { - ret.Tolerations = ret.mergeTolerations(with.Tolerations) - } - if with.TopologySpreadConstraints != nil { - ret.TopologySpreadConstraints = with.TopologySpreadConstraints - } - return ret -} - -// GetMgrPlacement returns the placement for the MGR service -func GetMgrPlacement(p PlacementSpec) Placement { - return p.All().Merge(p[KeyMgr]) -} - -// GetMonPlacement returns the placement for the MON service -func GetMonPlacement(p PlacementSpec) Placement { - return p.All().Merge(p[KeyMon]) -} - -// GetArbiterPlacement returns the placement for the arbiter MON service -func GetArbiterPlacement(p PlacementSpec) Placement { - // If the mon is the arbiter in a stretch cluster and its placement is specified, return it - // without merging with the "all" placement so it can be handled separately from all other daemons - return p[KeyMonArbiter] -} - -// GetOSDPlacement returns the placement for the OSD service -func GetOSDPlacement(p PlacementSpec) Placement { - return p.All().Merge(p[KeyOSD]) -} diff --git a/pkg/apis/ceph.rook.io/v1/placement_test.go b/pkg/apis/ceph.rook.io/v1/placement_test.go deleted file mode 100644 index 092e45eeb..000000000 --- a/pkg/apis/ceph.rook.io/v1/placement_test.go +++ /dev/null @@ -1,360 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/ghodss/yaml" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestPlacementSpec(t *testing.T) { - specYaml := []byte(` -nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: foo - operator: In - values: - - bar -tolerations: - - key: foo - operator: Exists -topologySpreadConstraints: - - maxSkew: 1 - topologyKey: zone - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - foo: bar`) - // convert the raw spec yaml into JSON - rawJSON, err := yaml.YAMLToJSON(specYaml) - assert.Nil(t, err) - - // unmarshal the JSON into a strongly typed placement spec object - var placement Placement - err = json.Unmarshal(rawJSON, &placement) - assert.Nil(t, err) - - // the unmarshalled placement spec should equal the expected spec below - expected := Placement{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: v1.NodeSelectorOpIn, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - }, - Tolerations: []v1.Toleration{ - { - Key: "foo", - Operator: v1.TolerationOpExists, - }, - }, - TopologySpreadConstraints: []v1.TopologySpreadConstraint{ - { - MaxSkew: 1, - TopologyKey: "zone", - WhenUnsatisfiable: "DoNotSchedule", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"foo": "bar"}, - }, - }, - }, - } - assert.Equal(t, expected, placement) -} - -func TestMergeNodeAffinity(t *testing.T) { - // affinity is nil - p := Placement{} - result := p.mergeNodeAffinity(nil) - assert.Nil(t, result) - - // node affinity is only set on the placement and should remain unchanged - p.NodeAffinity = placementTestGenerateNodeAffinity() - result = p.mergeNodeAffinity(nil) - assert.Equal(t, p.NodeAffinity, result) - - // preferred set, but required not set - affinityToMerge := placementTestGenerateNodeAffinity() - affinityToMerge.RequiredDuringSchedulingIgnoredDuringExecution = nil - result = p.mergeNodeAffinity(affinityToMerge) - assert.Equal(t, 2, len(result.PreferredDuringSchedulingIgnoredDuringExecution)) - assert.Equal(t, p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution, result.RequiredDuringSchedulingIgnoredDuringExecution) - - // preferred and required expressions set - affinityToMerge = placementTestGenerateNodeAffinity() - affinityToMerge.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key = "baz" - result = p.mergeNodeAffinity(affinityToMerge) - assert.Equal(t, 2, len(result.PreferredDuringSchedulingIgnoredDuringExecution)) - assert.Equal(t, 2, len(result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) - assert.Equal(t, "baz", result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key) - assert.Equal(t, "foo", result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[1].Key) -} - -func TestPlacementApplyToPodSpec(t *testing.T) { - to := placementTestGetTolerations("foo", "bar") - na := placementTestGenerateNodeAffinity() - antiaffinity := placementAntiAffinity("v1") - tc := placementTestGetTopologySpreadConstraints("zone") - expected := &v1.PodSpec{ - Affinity: &v1.Affinity{NodeAffinity: na, PodAntiAffinity: antiaffinity}, - Tolerations: to, - TopologySpreadConstraints: tc, - } - - var p Placement - var ps *v1.PodSpec - - p = Placement{ - NodeAffinity: na, - Tolerations: to, - PodAntiAffinity: antiaffinity, - TopologySpreadConstraints: tc, - } - ps = &v1.PodSpec{} - p.ApplyToPodSpec(ps) - assert.Equal(t, expected, ps) - assert.Equal(t, 1, len(ps.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution)) - - // Appending some other antiaffinity to the pod spec should not alter the original placement antiaffinity - otherAntiAffinity := placementAntiAffinity("v2") - ps.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append( - ps.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, - otherAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution...) - assert.Equal(t, 1, len(antiaffinity.PreferredDuringSchedulingIgnoredDuringExecution)) - - // partial update - p = Placement{NodeAffinity: na, PodAntiAffinity: antiaffinity} - ps = &v1.PodSpec{Tolerations: to, TopologySpreadConstraints: tc} - p.ApplyToPodSpec(ps) - assert.Equal(t, expected, ps) - - // overridden attributes - p = Placement{ - NodeAffinity: na, - PodAntiAffinity: antiaffinity, - Tolerations: to, - TopologySpreadConstraints: tc, - } - ps = &v1.PodSpec{ - TopologySpreadConstraints: placementTestGetTopologySpreadConstraints("rack"), - } - p.ApplyToPodSpec(ps) - assert.Equal(t, expected, ps) - - // The preferred affinity is merged from both sources to result in two node affinities - p = Placement{NodeAffinity: na, PodAntiAffinity: antiaffinity} - nap := placementTestGenerateNodeAffinity() - nap.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight = 5 - ps = &v1.PodSpec{ - Affinity: &v1.Affinity{NodeAffinity: nap}, - Tolerations: to, - TopologySpreadConstraints: tc, - } - p.ApplyToPodSpec(ps) - assert.Equal(t, 2, len(ps.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution)) - - p = Placement{NodeAffinity: na, PodAntiAffinity: antiaffinity} - to = placementTestGetTolerations("foo", "bar") - ps = &v1.PodSpec{ - Tolerations: to, - } - p.ApplyToPodSpec(ps) - assert.Equal(t, 1, len(ps.Tolerations)) - p = Placement{Tolerations: to, NodeAffinity: na, PodAntiAffinity: antiaffinity} - p.ApplyToPodSpec(ps) - assert.Equal(t, 2, len(ps.Tolerations)) -} - -func TestPlacementMerge(t *testing.T) { - to := placementTestGetTolerations("foo", "bar") - na := placementTestGenerateNodeAffinity() - tc := placementTestGetTopologySpreadConstraints("zone") - - var original, with, expected, merged Placement - - original = Placement{} - with = Placement{Tolerations: to} - expected = Placement{Tolerations: to} - merged = original.Merge(with) - assert.Equal(t, expected, merged) - - original = Placement{NodeAffinity: na} - with = Placement{Tolerations: to} - expected = Placement{NodeAffinity: na, Tolerations: to} - merged = original.Merge(with) - assert.Equal(t, expected, merged) - - original = Placement{} - with = Placement{TopologySpreadConstraints: tc} - expected = Placement{TopologySpreadConstraints: tc} - merged = original.Merge(with) - assert.Equal(t, expected, merged) - - original = Placement{ - Tolerations: placementTestGetTolerations("bar", "baz"), - TopologySpreadConstraints: placementTestGetTopologySpreadConstraints("rack"), - } - with = Placement{ - NodeAffinity: na, - Tolerations: to, - TopologySpreadConstraints: tc, - } - var ts int64 = 10 - expected = Placement{ - NodeAffinity: na, - Tolerations: []v1.Toleration{ - { - Key: "bar", - Operator: v1.TolerationOpExists, - Value: "baz", - Effect: v1.TaintEffectNoSchedule, - TolerationSeconds: &ts, - }, - { - Key: "foo", - Operator: v1.TolerationOpExists, - Value: "bar", - Effect: v1.TaintEffectNoSchedule, - TolerationSeconds: &ts, - }, - }, - TopologySpreadConstraints: tc, - } - merged = original.Merge(with) - assert.Equal(t, expected, merged) -} - -func placementTestGetTolerations(key, value string) []v1.Toleration { - var ts int64 = 10 - return []v1.Toleration{ - { - Key: key, - Operator: v1.TolerationOpExists, - Value: value, - Effect: v1.TaintEffectNoSchedule, - TolerationSeconds: &ts, - }, - } -} - -func placementTestGetTopologySpreadConstraints(topologyKey string) []v1.TopologySpreadConstraint { - return []v1.TopologySpreadConstraint{ - { - MaxSkew: 1, - TopologyKey: topologyKey, - WhenUnsatisfiable: "DoNotScheudule", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"foo": "bar"}, - }, - }, - } -} - -func placementAntiAffinity(value string) *v1.PodAntiAffinity { - return &v1.PodAntiAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ - { - Weight: 50, - PodAffinityTerm: v1.PodAffinityTerm{ - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": value, - }, - }, - TopologyKey: v1.LabelHostname, - }, - }, - }, - } -} - -func placementTestGenerateNodeAffinity() *v1.NodeAffinity { - return &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: v1.NodeSelectorOpExists, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ - { - Weight: 10, - Preference: v1.NodeSelectorTerm{ - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: v1.NodeSelectorOpExists, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - } -} - -func TestMergeToleration(t *testing.T) { - // placement is nil - p := Placement{} - result := p.mergeTolerations(nil) - assert.Nil(t, result) - - placementToleration := []v1.Toleration{ - { - Key: "foo", - Operator: v1.TolerationOpEqual, - }, - } - - p.Tolerations = placementToleration - result = p.mergeTolerations(nil) - assert.Equal(t, p.Tolerations, result) - - newToleration := []v1.Toleration{ - { - Key: "new", - Operator: v1.TolerationOpExists, - }, - } - - result = p.mergeTolerations(newToleration) - assert.Equal(t, 2, len(result)) - assert.Equal(t, placementToleration[0].Key, result[0].Key) - assert.Equal(t, newToleration[0].Key, result[1].Key) -} diff --git a/pkg/apis/ceph.rook.io/v1/pool.go b/pkg/apis/ceph.rook.io/v1/pool.go deleted file mode 100644 index 23ee14993..000000000 --- a/pkg/apis/ceph.rook.io/v1/pool.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" -) - -var ( - webhookName = "rook-ceph-webhook" - logger = capnslog.NewPackageLogger("github.com/rook/rook", webhookName) -) - -var _ webhook.Validator = &CephBlockPool{} - -func (p *PoolSpec) IsReplicated() bool { - return p.Replicated.Size > 0 -} - -func (p *PoolSpec) IsErasureCoded() bool { - return p.ErasureCoded.CodingChunks > 0 || p.ErasureCoded.DataChunks > 0 -} - -func (p *PoolSpec) IsHybridStoragePool() bool { - return p.Replicated.HybridStorage != nil -} - -func (p *PoolSpec) IsCompressionEnabled() bool { - return p.CompressionMode != "" -} - -func (p *ReplicatedSpec) IsTargetRatioEnabled() bool { - return p.TargetSizeRatio != 0 -} - -func (p *CephBlockPool) ValidateCreate() error { - logger.Infof("validate create cephblockpool %v", p) - - err := validatePoolSpec(p.Spec) - if err != nil { - return err - } - return nil -} - -func validatePoolSpec(ps PoolSpec) error { - // Checks if either ErasureCoded or Replicated fields are set - if ps.ErasureCoded.CodingChunks <= 0 && ps.ErasureCoded.DataChunks <= 0 && ps.Replicated.TargetSizeRatio <= 0 && ps.Replicated.Size <= 0 { - return errors.New("invalid create: either of erasurecoded or replicated fields should be set") - } - // Check if any of the ErasureCoded fields are populated. Then check if replicated is populated. Both can't be populated at same time. - if ps.ErasureCoded.CodingChunks > 0 || ps.ErasureCoded.DataChunks > 0 || ps.ErasureCoded.Algorithm != "" { - if ps.Replicated.Size > 0 || ps.Replicated.TargetSizeRatio > 0 { - return errors.New("invalid create: both erasurecoded and replicated fields cannot be set at the same time") - } - } - - if ps.Replicated.Size == 0 && ps.Replicated.TargetSizeRatio == 0 { - // Check if datachunks is set and has value less than 2. - if ps.ErasureCoded.DataChunks < 2 && ps.ErasureCoded.DataChunks != 0 { - return errors.New("invalid create: erasurecoded.datachunks needs minimum value of 2") - } - - // Check if codingchunks is set and has value less than 1. - if ps.ErasureCoded.CodingChunks < 1 && ps.ErasureCoded.CodingChunks != 0 { - return errors.New("invalid create: erasurecoded.codingchunks needs minimum value of 1") - } - } - return nil -} - -func (p *CephBlockPool) ValidateUpdate(old runtime.Object) error { - logger.Info("validate update cephblockpool") - ocbp := old.(*CephBlockPool) - err := validatePoolSpec(p.Spec) - if err != nil { - return err - } - if p.Spec.ErasureCoded.CodingChunks > 0 || p.Spec.ErasureCoded.DataChunks > 0 || p.Spec.ErasureCoded.Algorithm != "" { - if ocbp.Spec.Replicated.Size > 0 || ocbp.Spec.Replicated.TargetSizeRatio > 0 { - return errors.New("invalid update: replicated field is set already in previous object. cannot be changed to use erasurecoded") - } - } - - if p.Spec.Replicated.Size > 0 || p.Spec.Replicated.TargetSizeRatio > 0 { - if ocbp.Spec.ErasureCoded.CodingChunks > 0 || ocbp.Spec.ErasureCoded.DataChunks > 0 || ocbp.Spec.ErasureCoded.Algorithm != "" { - return errors.New("invalid update: erasurecoded field is set already in previous object. cannot be changed to use replicated") - } - } - return nil -} - -func (p *CephBlockPool) ValidateDelete() error { - return nil -} - -// SnapshotSchedulesEnabled returns whether snapshot schedules are desired -func (p *MirroringSpec) SnapshotSchedulesEnabled() bool { - return len(p.SnapshotSchedules) > 0 -} diff --git a/pkg/apis/ceph.rook.io/v1/pool_test.go b/pkg/apis/ceph.rook.io/v1/pool_test.go deleted file mode 100644 index 8e8bb5d42..000000000 --- a/pkg/apis/ceph.rook.io/v1/pool_test.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestValidatePoolSpec(t *testing.T) { - p := &CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ec-pool", - }, - Spec: PoolSpec{ - ErasureCoded: ErasureCodedSpec{ - CodingChunks: 1, - DataChunks: 2, - }, - }, - } - err := validatePoolSpec(p.Spec) - assert.NoError(t, err) - - p.Spec.ErasureCoded.DataChunks = 1 - err = validatePoolSpec(p.Spec) - assert.Error(t, err) -} - -func TestCephBlockPoolValidateUpdate(t *testing.T) { - p := &CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ec-pool", - }, - Spec: PoolSpec{ - Replicated: ReplicatedSpec{RequireSafeReplicaSize: true, Size: 3}, - }, - } - up := p.DeepCopy() - up.Spec.ErasureCoded.DataChunks = 2 - up.Spec.ErasureCoded.CodingChunks = 1 - err := up.ValidateUpdate(p) - assert.Error(t, err) -} - -func TestMirroringSpec_SnapshotSchedulesEnabled(t *testing.T) { - type fields struct { - Enabled bool - Mode string - SnapshotSchedules []SnapshotScheduleSpec - } - tests := []struct { - name string - fields fields - want bool - }{ - {"disabled", fields{Enabled: true, Mode: "pool", SnapshotSchedules: []SnapshotScheduleSpec{}}, false}, - {"enabled", fields{Enabled: true, Mode: "pool", SnapshotSchedules: []SnapshotScheduleSpec{{Interval: "2d"}}}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := &MirroringSpec{ - Enabled: tt.fields.Enabled, - Mode: tt.fields.Mode, - SnapshotSchedules: tt.fields.SnapshotSchedules, - } - if got := p.SnapshotSchedulesEnabled(); got != tt.want { - t.Errorf("MirroringSpec.SnapshotSchedulesEnabled() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/apis/ceph.rook.io/v1/priorityclasses.go b/pkg/apis/ceph.rook.io/v1/priorityclasses.go deleted file mode 100644 index d60ed1acd..000000000 --- a/pkg/apis/ceph.rook.io/v1/priorityclasses.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// All returns the priority class name defined for 'all' daemons in the Ceph cluster CRD. -func (p PriorityClassNamesSpec) All() string { - if val, ok := p[KeyAll]; ok { - return val - } - return "" -} - -// GetMgrPriorityClassName returns the priority class name for the MGR service -func GetMgrPriorityClassName(p PriorityClassNamesSpec) string { - if _, ok := p[KeyMgr]; !ok { - return p.All() - } - return p[KeyMgr] -} - -// GetMonPriorityClassName returns the priority class name for the monitors -func GetMonPriorityClassName(p PriorityClassNamesSpec) string { - if _, ok := p[KeyMon]; !ok { - return p.All() - } - return p[KeyMon] -} - -// GetOSDPriorityClassName returns the priority class name for the OSDs -func GetOSDPriorityClassName(p PriorityClassNamesSpec) string { - if _, ok := p[KeyOSD]; !ok { - return p.All() - } - return p[KeyOSD] -} - -// GetCleanupPriorityClassName returns the priority class name for the cleanup job -func GetCleanupPriorityClassName(p PriorityClassNamesSpec) string { - if _, ok := p[KeyCleanup]; !ok { - return p.All() - } - return p[KeyCleanup] -} diff --git a/pkg/apis/ceph.rook.io/v1/priorityclasses_test.go b/pkg/apis/ceph.rook.io/v1/priorityclasses_test.go deleted file mode 100644 index 13b50b421..000000000 --- a/pkg/apis/ceph.rook.io/v1/priorityclasses_test.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/ghodss/yaml" - "github.com/stretchr/testify/assert" -) - -func TestPriorityClassNamesSpec(t *testing.T) { - specYaml := []byte(` -all: all-class -mgr: mgr-class -mon: mon-class -osd: osd-class -`) - - // convert the raw spec yaml into JSON - rawJSON, err := yaml.YAMLToJSON(specYaml) - assert.Nil(t, err) - - // unmarshal the JSON into a strongly typed annotations spec object - var priorityClassNames PriorityClassNamesSpec - err = json.Unmarshal(rawJSON, &priorityClassNames) - assert.Nil(t, err) - - // the unmarshalled priority class names spec should equal the expected spec below - expected := PriorityClassNamesSpec{ - "all": "all-class", - "mgr": "mgr-class", - "mon": "mon-class", - "osd": "osd-class", - } - assert.Equal(t, expected, priorityClassNames) -} - -func TestPriorityClassNamesDefaultToAll(t *testing.T) { - priorityClassNames := PriorityClassNamesSpec{ - "all": "all-class", - "mon": "mon-class", - } - - assert.Equal(t, "all-class", priorityClassNames.All()) -} diff --git a/pkg/apis/ceph.rook.io/v1/register.go b/pkg/apis/ceph.rook.io/v1/register.go deleted file mode 100644 index d1d0e4940..000000000 --- a/pkg/apis/ceph.rook.io/v1/register.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - cephrookio "github.com/rook/rook/pkg/apis/ceph.rook.io" -) - -const ( - CustomResourceGroup = "ceph.rook.io" - Version = "v1" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: cephrookio.CustomResourceGroupName, Version: Version} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CephClient{}, - &CephClientList{}, - &CephCluster{}, - &CephClusterList{}, - &CephBlockPool{}, - &CephBlockPoolList{}, - &CephFilesystem{}, - &CephFilesystemList{}, - &CephNFS{}, - &CephNFSList{}, - &CephObjectStore{}, - &CephObjectStoreList{}, - &CephObjectStoreUser{}, - &CephObjectStoreUserList{}, - &CephObjectRealm{}, - &CephObjectRealmList{}, - &CephObjectZoneGroup{}, - &CephObjectZoneGroupList{}, - &CephObjectZone{}, - &CephObjectZoneList{}, - &CephRBDMirror{}, - &CephRBDMirrorList{}, - &CephFilesystemMirror{}, - &CephFilesystemMirrorList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/pkg/apis/ceph.rook.io/v1/resources.go b/pkg/apis/ceph.rook.io/v1/resources.go deleted file mode 100644 index 32f95df3b..000000000 --- a/pkg/apis/ceph.rook.io/v1/resources.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/core/v1" -) - -const ( - // ResourcesKeyMon represents the name of resource in the CR for a mon - ResourcesKeyMon = "mon" - // ResourcesKeyMgr represents the name of resource in the CR for a mgr - ResourcesKeyMgr = "mgr" - // ResourcesKeyMgrSidecar represents the name of resource in the CR for a mgr - ResourcesKeyMgrSidecar = "mgr-sidecar" - // ResourcesKeyOSD represents the name of a resource in the CR for all OSDs - ResourcesKeyOSD = "osd" - // ResourcesKeyPrepareOSD represents the name of resource in the CR for the osd prepare job - ResourcesKeyPrepareOSD = "prepareosd" - // ResourcesKeyMDS represents the name of resource in the CR for the mds - ResourcesKeyMDS = "mds" - // ResourcesKeyCrashCollector represents the name of resource in the CR for the crash - ResourcesKeyCrashCollector = "crashcollector" - // ResourcesKeyLogCollector represents the name of resource in the CR for the log - ResourcesKeyLogCollector = "logcollector" - // ResourcesKeyRBDMirror represents the name of resource in the CR for the rbd mirror - ResourcesKeyRBDMirror = "rbdmirror" - // ResourcesKeyFilesystemMirror represents the name of resource in the CR for the filesystem mirror - ResourcesKeyFilesystemMirror = "fsmirror" - // ResourcesKeyCleanup represents the name of resource in the CR for the cleanup - ResourcesKeyCleanup = "cleanup" -) - -// GetMgrResources returns the placement for the MGR service -func GetMgrResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyMgr] -} - -// GetMgrSidecarResources returns the placement for the MGR sidecar container -func GetMgrSidecarResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyMgrSidecar] -} - -// GetMonResources returns the placement for the monitors -func GetMonResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyMon] -} - -// GetOSDResources returns the placement for all OSDs or for OSDs of specified device class (hdd, nvme, ssd) -func GetOSDResources(p ResourceSpec, deviceClass string) v1.ResourceRequirements { - if deviceClass == "" { - return p[ResourcesKeyOSD] - } - // if device class specified, but not set in requirements return common osd requirements if present - r, ok := p[getOSDResourceKeyForDeviceClass(deviceClass)] - if ok { - return r - } - return p[ResourcesKeyOSD] -} - -// getOSDResourceKeyForDeviceClass returns key name for device class in resources spec -func getOSDResourceKeyForDeviceClass(deviceClass string) string { - return ResourcesKeyOSD + "-" + deviceClass -} - -// GetPrepareOSDResources returns the placement for the OSDs prepare job -func GetPrepareOSDResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyPrepareOSD] -} - -// GetCrashCollectorResources returns the placement for the crash daemon -func GetCrashCollectorResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyCrashCollector] -} - -// GetLogCollectorResources returns the placement for the crash daemon -func GetLogCollectorResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyCrashCollector] -} - -// GetCleanupResources returns the placement for the cleanup job -func GetCleanupResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyCleanup] -} diff --git a/pkg/apis/ceph.rook.io/v1/security.go b/pkg/apis/ceph.rook.io/v1/security.go deleted file mode 100644 index 013fd6bcb..000000000 --- a/pkg/apis/ceph.rook.io/v1/security.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "strings" - - "github.com/hashicorp/vault/api" -) - -var ( - VaultTLSConnectionDetails = []string{api.EnvVaultCACert, api.EnvVaultClientCert, api.EnvVaultClientKey} -) - -// IsEnabled return whether a KMS is configured -func (kms *KeyManagementServiceSpec) IsEnabled() bool { - return len(kms.ConnectionDetails) != 0 -} - -// IsTokenAuthEnabled return whether KMS token auth is enabled -func (kms *KeyManagementServiceSpec) IsTokenAuthEnabled() bool { - return kms.TokenSecretName != "" -} - -// IsTLSEnabled return KMS TLS details are configured -func (kms *KeyManagementServiceSpec) IsTLSEnabled() bool { - for _, tlsOption := range VaultTLSConnectionDetails { - tlsSecretName := getParam(kms.ConnectionDetails, tlsOption) - if tlsSecretName != "" { - return true - } - } - return false -} - -// getParam returns the value of the KMS config option -func getParam(kmsConfig map[string]string, param string) string { - if val, ok := kmsConfig[param]; ok && val != "" { - return strings.TrimSpace(val) - } - return "" -} diff --git a/pkg/apis/ceph.rook.io/v1/spec_test.go b/pkg/apis/ceph.rook.io/v1/spec_test.go deleted file mode 100644 index adab26e08..000000000 --- a/pkg/apis/ceph.rook.io/v1/spec_test.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1 - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/ghodss/yaml" - "github.com/stretchr/testify/assert" -) - -func TestClusterSpecMarshal(t *testing.T) { - specYaml := []byte(` -dataDirHostPath: /var/lib/rook -mon: - count: 5 - allowMultiplePerNode: false -network: - hostNetwork: true -storage: - useAllNodes: false - useAllDevices: false - deviceFilter: "^sd." - devicePathFilter: "^/dev/disk/by-path/pci-.*" - location: "region=us-west,datacenter=delmar" - config: - metadataDevice: "nvme01" - journalSizeMB: "1024" - databaseSizeMB: "1024" - nodes: - - name: "node2" - deviceFilter: "^foo*" - devicePathFilter: "^/dev/disk/by-id/.*foo.*"`) - - // convert the raw spec yaml into JSON - rawJSON, err := yaml.YAMLToJSON(specYaml) - assert.Nil(t, err) - fmt.Printf("rawJSON: %s\n", string(rawJSON)) - - // unmarshal the JSON into a strongly typed storage spec object - var clusterSpec ClusterSpec - err = json.Unmarshal(rawJSON, &clusterSpec) - assert.Nil(t, err) - - // the unmarshalled storage spec should equal the expected spec below - useAllDevices := false - expectedSpec := ClusterSpec{ - Mon: MonSpec{ - Count: 5, - AllowMultiplePerNode: false, - }, - DataDirHostPath: "/var/lib/rook", - Network: NetworkSpec{ - HostNetwork: true, - }, - Storage: StorageScopeSpec{ - UseAllNodes: false, - Selection: Selection{ - UseAllDevices: &useAllDevices, - DeviceFilter: "^sd.", - DevicePathFilter: "^/dev/disk/by-path/pci-.*", - }, - Config: map[string]string{ - "metadataDevice": "nvme01", - "journalSizeMB": "1024", - "databaseSizeMB": "1024", - }, - Nodes: []Node{ - { - Name: "node2", - Selection: Selection{ - DeviceFilter: "^foo*", - DevicePathFilter: "^/dev/disk/by-id/.*foo.*", - }, - }, - }, - }, - } - - assert.Equal(t, expectedSpec, clusterSpec) -} diff --git a/pkg/apis/ceph.rook.io/v1/status.go b/pkg/apis/ceph.rook.io/v1/status.go deleted file mode 100644 index 570ce0fbf..000000000 --- a/pkg/apis/ceph.rook.io/v1/status.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Based on code from https://github.com/kubernetes/apimachinery/blob/master/pkg/api/meta/conditions.go - -// A StatusConditionGetter allows getting a pointer to an object's conditions. -type StatusConditionGetter interface { - client.Object - - // GetStatusConditions returns a pointer to the object's conditions compatible with - // SetStatusCondition and FindStatusCondition. - GetStatusConditions() *[]Condition -} - -// SetStatusCondition sets the corresponding condition in conditions to newCondition. -// conditions must be non-nil. -// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to -// newCondition, LastTransitionTime is set to now if the new status differs from the old status) -// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) -func SetStatusCondition(conditions *[]Condition, newCondition Condition) { - if conditions == nil { - return - } - - now := metav1.NewTime(time.Now()) - - existingCondition := FindStatusCondition(*conditions, newCondition.Type) - if existingCondition == nil { - if newCondition.LastTransitionTime.IsZero() { - newCondition.LastTransitionTime = now - newCondition.LastHeartbeatTime = now - } - *conditions = append(*conditions, newCondition) - return - } - - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status - if !newCondition.LastTransitionTime.IsZero() { - existingCondition.LastTransitionTime = newCondition.LastTransitionTime - } else { - existingCondition.LastTransitionTime = now - } - } - - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message - if !newCondition.LastHeartbeatTime.IsZero() { - existingCondition.LastHeartbeatTime = newCondition.LastHeartbeatTime - } else { - existingCondition.LastHeartbeatTime = now - } -} - -// FindStatusCondition finds the conditionType in conditions. -func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { - for i := range conditions { - if conditions[i].Type == conditionType { - return &conditions[i] - } - } - - return nil -} diff --git a/pkg/apis/ceph.rook.io/v1/status_test.go b/pkg/apis/ceph.rook.io/v1/status_test.go deleted file mode 100644 index cdf9a6223..000000000 --- a/pkg/apis/ceph.rook.io/v1/status_test.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "reflect" - "testing" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Based on code from https://github.com/kubernetes/apimachinery/blob/master/pkg/api/meta/conditions.go - -func TestSetStatusCondition(t *testing.T) { - oneHourBefore := metav1.Time{Time: time.Now().Add(-1 * time.Hour)} - oneHourAfter := metav1.Time{Time: time.Now().Add(1 * time.Hour)} - - tests := []struct { - name string - conditions []Condition - toAdd Condition - expected []Condition - }{ - { - name: "should-add", - conditions: []Condition{ - {Type: "first"}, - {Type: "third"}, - }, - toAdd: Condition{Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - expected: []Condition{ - {Type: "first"}, - {Type: "third"}, - {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - }, - }, - { - name: "use-supplied-transition-time", - conditions: []Condition{ - {Type: "first"}, - {Type: "second", Status: v1.ConditionFalse}, - {Type: "third"}, - }, - toAdd: Condition{Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - expected: []Condition{ - {Type: "first"}, - {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - {Type: "third"}, - }, - }, - { - name: "update-fields", - conditions: []Condition{ - {Type: "first"}, - {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore}, - {Type: "third"}, - }, - toAdd: Condition{Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourAfter, LastHeartbeatTime: oneHourAfter, Reason: "reason", Message: "message"}, - expected: []Condition{ - {Type: "first"}, - {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourAfter, Reason: "reason", Message: "message"}, - {Type: "third"}, - }, - }, - { - name: "empty-conditions", - conditions: []Condition{}, - toAdd: Condition{Type: "first", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - expected: []Condition{ - {Type: "first", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - SetStatusCondition(&test.conditions, test.toAdd) - if !reflect.DeepEqual(test.conditions, test.expected) { - t.Error(test.conditions) - } - }) - } -} - -func TestFindStatusCondition(t *testing.T) { - tests := []struct { - name string - conditions []Condition - conditionType string - expected *Condition - }{ - { - name: "not-present", - conditions: []Condition{ - {Type: "first"}, - }, - conditionType: "second", - expected: nil, - }, - { - name: "present", - conditions: []Condition{ - {Type: "first"}, - {Type: "second"}, - }, - conditionType: "second", - expected: &Condition{Type: "second"}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actual := FindStatusCondition(test.conditions, ConditionType(test.conditionType)) - if !reflect.DeepEqual(actual, test.expected) { - t.Error(actual) - } - }) - } -} diff --git a/pkg/apis/ceph.rook.io/v1/storage.go b/pkg/apis/ceph.rook.io/v1/storage.go deleted file mode 100644 index 1f5cd9c71..000000000 --- a/pkg/apis/ceph.rook.io/v1/storage.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1 - -// AnyUseAllDevices gets whether to use all devices -func (s *StorageScopeSpec) AnyUseAllDevices() bool { - if s.Selection.GetUseAllDevices() { - return true - } - - for _, n := range s.Nodes { - if n.Selection.GetUseAllDevices() { - return true - } - } - - return false -} - -// ClearUseAllDevices clears all devices -func (s *StorageScopeSpec) ClearUseAllDevices() { - clear := false - s.Selection.UseAllDevices = &clear - for i := range s.Nodes { - s.Nodes[i].Selection.UseAllDevices = &clear - } -} - -// NodeExists returns true if the node exists in the storage spec. False otherwise. -func (s *StorageScopeSpec) NodeExists(nodeName string) bool { - for i := range s.Nodes { - if s.Nodes[i].Name == nodeName { - return true - } - } - return false -} - -// Fully resolves the config of the given node name, taking into account cluster level and node level specified config. -// In general, the more fine grained the configuration is specified, the more precedence it takes. Fully resolved -// configuration for the node has the following order of precedence. -// 1) Node (config defined on the node itself) -// 2) Cluster (config defined on the cluster) -// 3) Default values (if no config exists for the node or cluster) -func (s *StorageScopeSpec) ResolveNode(nodeName string) *Node { - // find the requested storage node first, if it exists - var node *Node - for i := range s.Nodes { - if s.Nodes[i].Name == nodeName { - node = &(s.Nodes[i]) - break - } - } - - if node == nil { - // a node with the given name was not found - return nil - } - if node.Config == nil { - node.Config = map[string]string{} - } - - // now resolve all properties that haven't already been set on the node - s.resolveNodeSelection(node) - s.resolveNodeConfig(node) - - return node -} - -func (s *StorageScopeSpec) resolveNodeSelection(node *Node) { - if node.Selection.UseAllDevices == nil { - if s.Selection.UseAllDevices != nil { - // the node does not have a value specified for use all devices, but the cluster does. Use the cluster's. - node.Selection.UseAllDevices = s.Selection.UseAllDevices - } else { - // neither node nor cluster have a value set for use all devices, use the default value. - node.Selection.UseAllDevices = newBool(false) - } - } - - resolveString(&(node.Selection.DeviceFilter), s.Selection.DeviceFilter, "") - resolveString(&(node.Selection.DevicePathFilter), s.Selection.DevicePathFilter, "") - - if len(node.Selection.Devices) == 0 { - node.Selection.Devices = s.Devices - } - - if len(node.Selection.VolumeClaimTemplates) == 0 { - node.Selection.VolumeClaimTemplates = s.VolumeClaimTemplates - } -} - -func (s *StorageScopeSpec) resolveNodeConfig(node *Node) { - // check for any keys the parent scope has that the node does not - for scopeKey, scopeVal := range s.Config { - if _, ok := node.Config[scopeKey]; !ok { - // the node's config does not have an entry that the parent scope does, add the parent's - // value for that key to the node's config. - node.Config[scopeKey] = scopeVal - } - } -} - -// NodeWithNameExists returns true if the storage spec defines a node with the given name. -func (s *StorageScopeSpec) NodeWithNameExists(name string) bool { - for _, n := range s.Nodes { - if name == n.Name { - return true - } - } - return false -} - -// GetUseAllDevices return if all devices should be used. -func (s *Selection) GetUseAllDevices() bool { - return s.UseAllDevices != nil && *(s.UseAllDevices) -} - -func resolveString(setting *string, parent, defaultVal string) { - if *setting == "" { - if parent != "" { - *setting = parent - } else { - *setting = defaultVal - } - } -} - -func newBool(val bool) *bool { - return &val -} - -// NodesByName implements an interface to sort nodes by name -type NodesByName []Node - -func (s NodesByName) Len() int { - return len(s) -} - -func (s NodesByName) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s NodesByName) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -// IsOnPVCEncrypted returns whether a Ceph Cluster on PVC will be encrypted -func (s *StorageScopeSpec) IsOnPVCEncrypted() bool { - for _, storageClassDeviceSet := range s.StorageClassDeviceSets { - if storageClassDeviceSet.Encrypted { - return true - } - } - - return false -} diff --git a/pkg/apis/ceph.rook.io/v1/storage_test.go b/pkg/apis/ceph.rook.io/v1/storage_test.go deleted file mode 100644 index 561fc96fb..000000000 --- a/pkg/apis/ceph.rook.io/v1/storage_test.go +++ /dev/null @@ -1,289 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNodeExists(t *testing.T) { - t.Run("does not exist - no nodes specified", func(t *testing.T) { - spec := StorageScopeSpec{} - assert.False(t, spec.NodeExists("does-not-exist")) - }) - - t.Run("exists - single node specified", func(t *testing.T) { - spec := StorageScopeSpec{ - Nodes: []Node{ - {Name: "node1"}, // node gets nothing but its name set - }, - } - assert.True(t, spec.NodeExists("node1")) - }) - - t.Run("exists and not exists - multiple nodes specified", func(t *testing.T) { - spec := StorageScopeSpec{ - Nodes: []Node{ - {Name: "node1"}, // node gets nothing but its name set - {Name: "node3"}, - {Name: "node4"}, - }, - } - assert.True(t, spec.NodeExists("node1")) - assert.False(t, spec.NodeExists("node2")) - assert.True(t, spec.NodeExists("node3")) - assert.True(t, spec.NodeExists("node4")) - assert.False(t, spec.NodeExists("node5")) - assert.False(t, spec.NodeExists("does-not-exist")) - }) -} - -func TestResolveNodeNotExist(t *testing.T) { - // a non existing node should return nil - storageSpec := StorageScopeSpec{} - node := storageSpec.ResolveNode("fake node") - assert.Nil(t, node) -} - -func TestResolveNodeDefaultValues(t *testing.T) { - // a node with no properties and none defined in the cluster storage spec should get the default values - storageSpec := StorageScopeSpec{ - Nodes: []Node{ - {Name: "node1"}, // node gets nothing but its name set - }, - } - - node := storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, "", node.Selection.DeviceFilter) - assert.Equal(t, "", node.Selection.DevicePathFilter) - assert.False(t, node.Selection.GetUseAllDevices()) - assert.Equal(t, storageSpec.Devices, node.Devices) -} - -func TestResolveNodeInherentFromCluster(t *testing.T) { - // a node with no properties defined should inherit them from the cluster storage spec - storageSpec := StorageScopeSpec{ - Selection: Selection{ - DeviceFilter: "^sd.", - DevicePathFilter: "^/dev/disk/by-path/pci-.*", - Devices: []Device{{Name: "sda"}}, - }, - Config: map[string]string{ - "foo": "bar", - }, - Nodes: []Node{ - {Name: "node1"}, // node gets nothing but its name set - }, - } - - node := storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, "^sd.", node.Selection.DeviceFilter) - assert.Equal(t, "^/dev/disk/by-path/pci-.*", node.Selection.DevicePathFilter) - assert.False(t, node.Selection.GetUseAllDevices()) - assert.Equal(t, "bar", node.Config["foo"]) - assert.Equal(t, []Device{{Name: "sda"}}, node.Devices) -} - -func TestResolveNodeSpecificProperties(t *testing.T) { - // a node with its own specific properties defined should keep those values, regardless of what the global cluster config is - storageSpec := StorageScopeSpec{ - Selection: Selection{ - DeviceFilter: "^sd.", - DevicePathFilter: "^/dev/disk/by-path/pci-.*", - }, - Config: map[string]string{ - "foo": "bar", - "baz": "biz", - }, - Nodes: []Node{ - { - Name: "node1", // node has its own config that should override cluster level config - Selection: Selection{ - DeviceFilter: "nvme.*", - DevicePathFilter: "^/dev/disk/by-id/.*foo.*", - Devices: []Device{{Name: "device026"}}, - }, - Config: map[string]string{ - "foo": "node1bar", - }, - }, - }, - } - - node := storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.False(t, node.Selection.GetUseAllDevices()) - assert.Equal(t, "nvme.*", node.Selection.DeviceFilter) - assert.Equal(t, "^/dev/disk/by-id/.*foo.*", node.Selection.DevicePathFilter) - assert.Equal(t, []Device{{Name: "device026"}}, node.Devices) - assert.Equal(t, "node1bar", node.Config["foo"]) - assert.Equal(t, "biz", node.Config["baz"]) -} - -func TestResolveNodeUseAllDevices(t *testing.T) { - storageSpec := StorageScopeSpec{ - Selection: Selection{UseAllDevices: newBool(true)}, // UseAllDevices is set to true on the storage spec - Nodes: []Node{ - {Name: "node1"}, // node gets nothing but its name set - }, - } - - node := storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.True(t, node.Selection.GetUseAllDevices()) -} - -func TestUseAllDevices(t *testing.T) { - storageSpec := StorageScopeSpec{} - assert.False(t, storageSpec.AnyUseAllDevices()) - - storageSpec = StorageScopeSpec{ - Selection: Selection{ - UseAllDevices: newBool(true)}, // UseAllDevices is set to true on the storage spec - } - assert.True(t, storageSpec.AnyUseAllDevices()) - - storageSpec = StorageScopeSpec{ - Selection: Selection{UseAllDevices: newBool(false)}, - Nodes: []Node{ - { - Name: "node1", - Selection: Selection{UseAllDevices: newBool(true)}, - }, - }, - } - assert.True(t, storageSpec.AnyUseAllDevices()) -} - -func TestClearUseAllDevices(t *testing.T) { - // create a storage spec with use all devices set to true for the cluster and for all nodes - storageSpec := StorageScopeSpec{ - Selection: Selection{UseAllDevices: newBool(true)}, - Nodes: []Node{ - { - Name: "node1", - Selection: Selection{UseAllDevices: newBool(true)}, - }, - }, - } - assert.True(t, storageSpec.AnyUseAllDevices()) - - // now clear the use all devices field, it should be cleared from the entire cluster and its nodes - storageSpec.ClearUseAllDevices() - assert.False(t, storageSpec.AnyUseAllDevices()) -} - -func TestClusterDirsDevsInherit(t *testing.T) { - // test for no directories or devices given - storageSpec := StorageScopeSpec{ - Nodes: []Node{ - { - Name: "node1", - }, - }, - } - node := storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, storageSpec.Devices, node.Devices) - - // test if cluster wide devices are inherited to no-directories/devices node - storageSpec = StorageScopeSpec{ - Selection: Selection{ - Devices: []Device{{Name: "device1"}}, - }, - Nodes: []Node{ - { - Name: "node1", - }, - }, - } - node = storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, []Device{{Name: "device1"}}, node.Devices) - - // test if node directories and devices are used - storageSpec = StorageScopeSpec{ - Nodes: []Node{ - { - Name: "node1", - Selection: Selection{ - Devices: []Device{{Name: "device2"}}, - }, - }, - }, - } - node = storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, []Device{{Name: "device2"}}, node.Devices) - - // test if cluster wide devices are and aren't inherited to nodes with and without directories/devices - storageSpec = StorageScopeSpec{ - Selection: Selection{ - Devices: []Device{{Name: "device4"}}, - }, - Nodes: []Node{ - { - Name: "node1", - Selection: Selection{ - Devices: []Device{{Name: "device3"}}, - }, - }, - { - Name: "node2", - }, - }, - } - // node1 keeps its specified devices - node = storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, []Device{{Name: "device3"}}, node.Devices) - - // node2 inherits the cluster wide devices since it specified none of its own - node = storageSpec.ResolveNode("node2") - assert.NotNil(t, node) - assert.Equal(t, []Device{{Name: "device4"}}, node.Devices) -} - -func TestStorageScopeSpec_NodeWithNameExists(t *testing.T) { - spec := &StorageScopeSpec{ - Nodes: []Node{}, - } - - assert.False(t, spec.NodeWithNameExists("node0")) - - spec.Nodes = []Node{ - {Name: "node0-hostname"}, - {Name: "node1"}, - {Name: "node2"}} - assert.True(t, spec.NodeWithNameExists("node0-hostname")) - assert.False(t, spec.NodeWithNameExists("node0")) - assert.True(t, spec.NodeWithNameExists("node1")) - assert.True(t, spec.NodeWithNameExists("node2")) -} - -func TestIsOnPVCEncrypted(t *testing.T) { - s := &StorageScopeSpec{} - assert.False(t, s.IsOnPVCEncrypted()) - - s.StorageClassDeviceSets = []StorageClassDeviceSet{ - {Encrypted: true}, - } - assert.True(t, s.IsOnPVCEncrypted()) -} diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go deleted file mode 100755 index ea35e8ef0..000000000 --- a/pkg/apis/ceph.rook.io/v1/types.go +++ /dev/null @@ -1,2045 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "time" - - rook "github.com/rook/rook/pkg/apis/rook.io" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// *************************************************************************** -// IMPORTANT FOR CODE GENERATION -// If the types in this file are updated, you will need to run -// `make codegen` to generate the new types under the client/clientset folder. -// *************************************************************************** - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephCluster is a Ceph storage cluster -// +kubebuilder:printcolumn:name="DataDirHostPath",type=string,JSONPath=`.spec.dataDirHostPath`,description="Directory used on the K8s nodes" -// +kubebuilder:printcolumn:name="MonCount",type=string,JSONPath=`.spec.mon.count`,description="Number of MONs" -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` -// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`,description="Phase" -// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message`,description="Message" -// +kubebuilder:printcolumn:name="Health",type=string,JSONPath=`.status.ceph.health`,description="Ceph Health" -// +kubebuilder:printcolumn:name="External",type=boolean,JSONPath=`.spec.external.enable` -// +kubebuilder:subresource:status -type CephCluster struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec ClusterSpec `json:"spec"` - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - // +nullable - Status ClusterStatus `json:"status,omitempty"` -} - -// CephClusterHealthCheckSpec represent the healthcheck for Ceph daemons -type CephClusterHealthCheckSpec struct { - // DaemonHealth is the health check for a given daemon - // +optional - // +nullable - DaemonHealth DaemonHealthSpec `json:"daemonHealth,omitempty"` - // LivenessProbe allows to change the livenessprobe configuration for a given daemon - // +optional - LivenessProbe map[rook.KeyType]*ProbeSpec `json:"livenessProbe,omitempty"` -} - -// DaemonHealthSpec is a daemon health check -type DaemonHealthSpec struct { - // Status represents the health check settings for the Ceph health - // +optional - // +nullable - Status HealthCheckSpec `json:"status,omitempty"` - // Monitor represents the health check settings for the Ceph monitor - // +optional - // +nullable - Monitor HealthCheckSpec `json:"mon,omitempty"` - // ObjectStorageDaemon represents the health check settings for the Ceph OSDs - // +optional - // +nullable - ObjectStorageDaemon HealthCheckSpec `json:"osd,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephClusterList is a list of CephCluster -type CephClusterList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephCluster `json:"items"` -} - -// ClusterSpec represents the specification of Ceph Cluster -type ClusterSpec struct { - // The version information that instructs Rook to orchestrate a particular version of Ceph. - // +optional - // +nullable - CephVersion CephVersionSpec `json:"cephVersion,omitempty"` - - // A spec for available storage in the cluster and how it should be used - // +optional - // +nullable - Storage StorageScopeSpec `json:"storage,omitempty"` - - // The annotations-related configuration to add/set on each Pod related object. - // +nullable - // +optional - Annotations AnnotationsSpec `json:"annotations,omitempty"` - - // The labels-related configuration to add/set on each Pod related object. - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Labels LabelsSpec `json:"labels,omitempty"` - - // The placement-related configuration to pass to kubernetes (affinity, node selector, tolerations). - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Placement PlacementSpec `json:"placement,omitempty"` - - // Network related configuration - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Network NetworkSpec `json:"network,omitempty"` - - // Resources set resource requests and limits - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Resources ResourceSpec `json:"resources,omitempty"` - - // PriorityClassNames sets priority classes on components - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - PriorityClassNames PriorityClassNamesSpec `json:"priorityClassNames,omitempty"` - - // The path on the host where config and data can be persisted - // +kubebuilder:validation:Pattern=`^/(\S+)` - // +optional - DataDirHostPath string `json:"dataDirHostPath,omitempty"` - - // SkipUpgradeChecks defines if an upgrade should be forced even if one of the check fails - // +optional - SkipUpgradeChecks bool `json:"skipUpgradeChecks,omitempty"` - - // ContinueUpgradeAfterChecksEvenIfNotHealthy defines if an upgrade should continue even if PGs are not clean - // +optional - ContinueUpgradeAfterChecksEvenIfNotHealthy bool `json:"continueUpgradeAfterChecksEvenIfNotHealthy,omitempty"` - - // WaitTimeoutForHealthyOSDInMinutes defines the time the operator would wait before an OSD can be stopped for upgrade or restart. - // If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one - // if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would - // continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. - // The default wait timeout is 10 minutes. - // +optional - WaitTimeoutForHealthyOSDInMinutes time.Duration `json:"waitTimeoutForHealthyOSDInMinutes,omitempty"` - - // A spec for configuring disruption management. - // +nullable - // +optional - DisruptionManagement DisruptionManagementSpec `json:"disruptionManagement,omitempty"` - - // A spec for mon related options - // +optional - // +nullable - Mon MonSpec `json:"mon,omitempty"` - - // A spec for the crash controller - // +optional - // +nullable - CrashCollector CrashCollectorSpec `json:"crashCollector,omitempty"` - - // Dashboard settings - // +optional - // +nullable - Dashboard DashboardSpec `json:"dashboard,omitempty"` - - // Prometheus based Monitoring settings - // +optional - // +nullable - Monitoring MonitoringSpec `json:"monitoring,omitempty"` - - // Whether the Ceph Cluster is running external to this Kubernetes cluster - // mon, mgr, osd, mds, and discover daemons will not be created for external clusters. - // +optional - // +nullable - External ExternalSpec `json:"external,omitempty"` - - // A spec for mgr related options - // +optional - // +nullable - Mgr MgrSpec `json:"mgr,omitempty"` - - // Remove the OSD that is out and safe to remove only if this option is true - // +optional - RemoveOSDsIfOutAndSafeToRemove bool `json:"removeOSDsIfOutAndSafeToRemove,omitempty"` - - // Indicates user intent when deleting a cluster; blocks orchestration and should not be set if cluster - // deletion is not imminent. - // +optional - // +nullable - CleanupPolicy CleanupPolicySpec `json:"cleanupPolicy,omitempty"` - - // Internal daemon healthchecks and liveness probe - // +optional - // +nullable - HealthCheck CephClusterHealthCheckSpec `json:"healthCheck,omitempty"` - - // Security represents security settings - // +optional - // +nullable - Security SecuritySpec `json:"security,omitempty"` - - // Logging represents loggings settings - // +optional - // +nullable - LogCollector LogCollectorSpec `json:"logCollector,omitempty"` -} - -// LogCollectorSpec is the logging spec -type LogCollectorSpec struct { - // Enabled represents whether the log collector is enabled - // +optional - Enabled bool `json:"enabled,omitempty"` - // Periodicity is the periodicity of the log rotation - // +optional - Periodicity string `json:"periodicity,omitempty"` -} - -// SecuritySpec is security spec to include various security items such as kms -type SecuritySpec struct { - // KeyManagementService is the main Key Management option - // +optional - // +nullable - KeyManagementService KeyManagementServiceSpec `json:"kms,omitempty"` -} - -// KeyManagementServiceSpec represent various details of the KMS server -type KeyManagementServiceSpec struct { - // ConnectionDetails contains the KMS connection details (address, port etc) - // +optional - // +nullable - // +kubebuilder:pruning:PreserveUnknownFields - ConnectionDetails map[string]string `json:"connectionDetails,omitempty"` - // TokenSecretName is the kubernetes secret containing the KMS token - // +optional - TokenSecretName string `json:"tokenSecretName,omitempty"` -} - -// CephVersionSpec represents the settings for the Ceph version that Rook is orchestrating. -type CephVersionSpec struct { - // Image is the container image used to launch the ceph daemons, such as quay.io/ceph/ceph: - // The full list of images can be found at https://quay.io/repository/ceph/ceph?tab=tags - // +optional - Image string `json:"image,omitempty"` - - // Whether to allow unsupported versions (do not set to true in production) - // +optional - AllowUnsupported bool `json:"allowUnsupported,omitempty"` -} - -// DashboardSpec represents the settings for the Ceph dashboard -type DashboardSpec struct { - // Enabled determines whether to enable the dashboard - // +optional - Enabled bool `json:"enabled,omitempty"` - // URLPrefix is a prefix for all URLs to use the dashboard with a reverse proxy - // +optional - URLPrefix string `json:"urlPrefix,omitempty"` - // Port is the dashboard webserver port - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=65535 - // +optional - Port int `json:"port,omitempty"` - // SSL determines whether SSL should be used - // +optional - SSL bool `json:"ssl,omitempty"` -} - -// MonitoringSpec represents the settings for Prometheus based Ceph monitoring -type MonitoringSpec struct { - // Enabled determines whether to create the prometheus rules for the ceph cluster. If true, the prometheus - // types must exist or the creation will fail. - // +optional - Enabled bool `json:"enabled,omitempty"` - - // RulesNamespace is the namespace where the prometheus rules and alerts should be created. - // If empty, the same namespace as the cluster will be used. - // +optional - RulesNamespace string `json:"rulesNamespace,omitempty"` - - // ExternalMgrEndpoints points to an existing Ceph prometheus exporter endpoint - // +optional - // +nullable - ExternalMgrEndpoints []v1.EndpointAddress `json:"externalMgrEndpoints,omitempty"` - - // ExternalMgrPrometheusPort Prometheus exporter port - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=65535 - // +optional - ExternalMgrPrometheusPort uint16 `json:"externalMgrPrometheusPort,omitempty"` -} - -// ClusterStatus represents the status of a Ceph cluster -type ClusterStatus struct { - State ClusterState `json:"state,omitempty"` - Phase ConditionType `json:"phase,omitempty"` - Message string `json:"message,omitempty"` - Conditions []Condition `json:"conditions,omitempty"` - CephStatus *CephStatus `json:"ceph,omitempty"` - CephStorage *CephStorage `json:"storage,omitempty"` - CephVersion *ClusterVersion `json:"version,omitempty"` -} - -// CephDaemonsVersions show the current ceph version for different ceph daemons -type CephDaemonsVersions struct { - // Mon shows Mon Ceph version - // +optional - Mon map[string]int `json:"mon,omitempty"` - // Mgr shows Mgr Ceph version - // +optional - Mgr map[string]int `json:"mgr,omitempty"` - // Osd shows Osd Ceph version - // +optional - Osd map[string]int `json:"osd,omitempty"` - // Rgw shows Rgw Ceph version - // +optional - Rgw map[string]int `json:"rgw,omitempty"` - // Mds shows Mds Ceph version - // +optional - Mds map[string]int `json:"mds,omitempty"` - // RbdMirror shows RbdMirror Ceph version - // +optional - RbdMirror map[string]int `json:"rbd-mirror,omitempty"` - // CephFSMirror shows CephFSMirror Ceph version - // +optional - CephFSMirror map[string]int `json:"cephfs-mirror,omitempty"` - // Overall shows overall Ceph version - // +optional - Overall map[string]int `json:"overall,omitempty"` -} - -// CephStatus is the details health of a Ceph Cluster -type CephStatus struct { - Health string `json:"health,omitempty"` - Details map[string]CephHealthMessage `json:"details,omitempty"` - LastChecked string `json:"lastChecked,omitempty"` - LastChanged string `json:"lastChanged,omitempty"` - PreviousHealth string `json:"previousHealth,omitempty"` - Capacity Capacity `json:"capacity,omitempty"` - // +optional - Versions *CephDaemonsVersions `json:"versions,omitempty"` -} - -// Capacity is the capacity information of a Ceph Cluster -type Capacity struct { - TotalBytes uint64 `json:"bytesTotal,omitempty"` - UsedBytes uint64 `json:"bytesUsed,omitempty"` - AvailableBytes uint64 `json:"bytesAvailable,omitempty"` - LastUpdated string `json:"lastUpdated,omitempty"` -} - -// CephStorage represents flavors of Ceph Cluster Storage -type CephStorage struct { - DeviceClasses []DeviceClasses `json:"deviceClasses,omitempty"` -} - -// DeviceClasses represents device classes of a Ceph Cluster -type DeviceClasses struct { - Name string `json:"name,omitempty"` -} - -// ClusterVersion represents the version of a Ceph Cluster -type ClusterVersion struct { - Image string `json:"image,omitempty"` - Version string `json:"version,omitempty"` -} - -// CephHealthMessage represents the health message of a Ceph Cluster -type CephHealthMessage struct { - Severity string `json:"severity"` - Message string `json:"message"` -} - -// Condition represents a status condition on any Rook-Ceph Custom Resource. -type Condition struct { - Type ConditionType `json:"type,omitempty"` - Status v1.ConditionStatus `json:"status,omitempty"` - Reason ConditionReason `json:"reason,omitempty"` - Message string `json:"message,omitempty"` - LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"` - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} - -// ConditionReason is a reason for a condition -type ConditionReason string - -const ( - // ClusterCreatedReason is cluster created reason - ClusterCreatedReason ConditionReason = "ClusterCreated" - // ClusterConnectedReason is cluster connected reason - ClusterConnectedReason ConditionReason = "ClusterConnected" - // ClusterProgressingReason is cluster progressing reason - ClusterProgressingReason ConditionReason = "ClusterProgressing" - // ClusterDeletingReason is cluster deleting reason - ClusterDeletingReason ConditionReason = "ClusterDeleting" - // ClusterConnectingReason is cluster connecting reason - ClusterConnectingReason ConditionReason = "ClusterConnecting" - - // ReconcileSucceeded represents when a resource reconciliation was successful. - ReconcileSucceeded ConditionReason = "ReconcileSucceeded" - // ReconcileFailed represents when a resource reconciliation failed. - ReconcileFailed ConditionReason = "ReconcileFailed" - - // DeletingReason represents when Rook has detected a resource object should be deleted. - DeletingReason ConditionReason = "Deleting" - // ObjectHasDependentsReason represents when a resource object has dependents that are blocking - // deletion. - ObjectHasDependentsReason ConditionReason = "ObjectHasDependents" - // ObjectHasNoDependentsReason represents when a resource object has no dependents that are - // blocking deletion. - ObjectHasNoDependentsReason ConditionReason = "ObjectHasNoDependents" -) - -// ConditionType represent a resource's status -type ConditionType string - -const ( - // ConditionConnecting represents Connecting state of an object - ConditionConnecting ConditionType = "Connecting" - // ConditionConnected represents Connected state of an object - ConditionConnected ConditionType = "Connected" - // ConditionProgressing represents Progressing state of an object - ConditionProgressing ConditionType = "Progressing" - // ConditionReady represents Ready state of an object - ConditionReady ConditionType = "Ready" - // ConditionFailure represents Failure state of an object - ConditionFailure ConditionType = "Failure" - // ConditionDeleting represents Deleting state of an object - ConditionDeleting ConditionType = "Deleting" - - // ConditionDeletionIsBlocked represents when deletion of the object is blocked. - ConditionDeletionIsBlocked ConditionType = "DeletionIsBlocked" -) - -// ClusterState represents the state of a Ceph Cluster -type ClusterState string - -const ( - // ClusterStateCreating represents the Creating state of a Ceph Cluster - ClusterStateCreating ClusterState = "Creating" - // ClusterStateCreated represents the Created state of a Ceph Cluster - ClusterStateCreated ClusterState = "Created" - // ClusterStateUpdating represents the Updating state of a Ceph Cluster - ClusterStateUpdating ClusterState = "Updating" - // ClusterStateConnecting represents the Connecting state of a Ceph Cluster - ClusterStateConnecting ClusterState = "Connecting" - // ClusterStateConnected represents the Connected state of a Ceph Cluster - ClusterStateConnected ClusterState = "Connected" - // ClusterStateError represents the Error state of a Ceph Cluster - ClusterStateError ClusterState = "Error" -) - -// MonSpec represents the specification of the monitor -type MonSpec struct { - // Count is the number of Ceph monitors - // +kubebuilder:validation:Minimum=0 - // +optional - Count int `json:"count,omitempty"` - // AllowMultiplePerNode determines if we can run multiple monitors on the same node (not recommended) - // +optional - AllowMultiplePerNode bool `json:"allowMultiplePerNode,omitempty"` - // StretchCluster is the stretch cluster specification - // +optional - StretchCluster *StretchClusterSpec `json:"stretchCluster,omitempty"` - // VolumeClaimTemplate is the PVC definition - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` -} - -// StretchClusterSpec represents the specification of a stretched Ceph Cluster -type StretchClusterSpec struct { - // FailureDomainLabel the failure domain name (e,g: zone) - // +optional - FailureDomainLabel string `json:"failureDomainLabel,omitempty"` - // SubFailureDomain is the failure domain within a zone - // +optional - SubFailureDomain string `json:"subFailureDomain,omitempty"` - // Zones is the list of zones - // +optional - // +nullable - Zones []StretchClusterZoneSpec `json:"zones,omitempty"` -} - -// StretchClusterZoneSpec represents the specification of a stretched zone in a Ceph Cluster -type StretchClusterZoneSpec struct { - // Name is the name of the zone - // +optional - Name string `json:"name,omitempty"` - // Arbiter determines if the zone contains the arbiter - // +optional - Arbiter bool `json:"arbiter,omitempty"` - // VolumeClaimTemplate is the PVC template - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` -} - -// MgrSpec represents options to configure a ceph mgr -type MgrSpec struct { - // Count is the number of manager to run - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=2 - // +optional - Count int `json:"count,omitempty"` - // AllowMultiplePerNode allows to run multiple managers on the same node (not recommended) - // +optional - AllowMultiplePerNode bool `json:"allowMultiplePerNode,omitempty"` - // Modules is the list of ceph manager modules to enable/disable - // +optional - // +nullable - Modules []Module `json:"modules,omitempty"` -} - -// Module represents mgr modules that the user wants to enable or disable -type Module struct { - // Name is the name of the ceph manager module - // +optional - Name string `json:"name,omitempty"` - // Enabled determines whether a module should be enabled or not - // +optional - Enabled bool `json:"enabled,omitempty"` -} - -// ExternalSpec represents the options supported by an external cluster -// +kubebuilder:pruning:PreserveUnknownFields -// +nullable -type ExternalSpec struct { - // Enable determines whether external mode is enabled or not - // +optional - Enable bool `json:"enable,omitempty"` -} - -// CrashCollectorSpec represents options to configure the crash controller -type CrashCollectorSpec struct { - // Disable determines whether we should enable the crash collector - // +optional - Disable bool `json:"disable,omitempty"` - - // DaysToRetain represents the number of days to retain crash until they get pruned - // +optional - DaysToRetain uint `json:"daysToRetain,omitempty"` -} - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephBlockPool represents a Ceph Storage Pool -// +kubebuilder:subresource:status -type CephBlockPool struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec PoolSpec `json:"spec"` - // +kubebuilder:pruning:PreserveUnknownFields - Status *CephBlockPoolStatus `json:"status,omitempty"` -} - -// CephBlockPoolList is a list of Ceph Storage Pools -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type CephBlockPoolList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephBlockPool `json:"items"` -} - -const ( - // DefaultFailureDomain for PoolSpec - DefaultFailureDomain = "host" - // DefaultCRUSHRoot is the default name of the CRUSH root bucket - DefaultCRUSHRoot = "default" -) - -// PoolSpec represents the spec of ceph pool -type PoolSpec struct { - // The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map - // +optional - FailureDomain string `json:"failureDomain,omitempty"` - - // The root of the crush hierarchy utilized by the pool - // +optional - // +nullable - CrushRoot string `json:"crushRoot,omitempty"` - - // The device class the OSD should set to for use in the pool - // +optional - // +nullable - DeviceClass string `json:"deviceClass,omitempty"` - - // The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) - // +kubebuilder:validation:Enum=none;passive;aggressive;force;"" - // +kubebuilder:default=none - // +optional - // +nullable - CompressionMode string `json:"compressionMode,omitempty"` - - // The replication settings - // +optional - Replicated ReplicatedSpec `json:"replicated,omitempty"` - - // The erasure code settings - // +optional - ErasureCoded ErasureCodedSpec `json:"erasureCoded,omitempty"` - - // Parameters is a list of properties to enable on a given pool - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - // +nullable - Parameters map[string]string `json:"parameters,omitempty"` - - // EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool - EnableRBDStats bool `json:"enableRBDStats,omitempty"` - - // The mirroring settings - Mirroring MirroringSpec `json:"mirroring,omitempty"` - - // The mirroring statusCheck - // +kubebuilder:pruning:PreserveUnknownFields - StatusCheck MirrorHealthCheckSpec `json:"statusCheck,omitempty"` - - // The quota settings - // +optional - // +nullable - Quotas QuotaSpec `json:"quotas,omitempty"` -} - -// MirrorHealthCheckSpec represents the health specification of a Ceph Storage Pool mirror -type MirrorHealthCheckSpec struct { - // +optional - // +nullable - Mirror HealthCheckSpec `json:"mirror,omitempty"` -} - -// CephBlockPoolStatus represents the mirroring status of Ceph Storage Pool -type CephBlockPoolStatus struct { - // +optional - Phase ConditionType `json:"phase,omitempty"` - // +optional - MirroringStatus *MirroringStatusSpec `json:"mirroringStatus,omitempty"` - // +optional - MirroringInfo *MirroringInfoSpec `json:"mirroringInfo,omitempty"` - // +optional - SnapshotScheduleStatus *SnapshotScheduleStatusSpec `json:"snapshotScheduleStatus,omitempty"` - // +optional - // +nullable - Info map[string]string `json:"info,omitempty"` -} - -// MirroringStatusSpec is the status of the pool mirroring -type MirroringStatusSpec struct { - // PoolMirroringStatus is the mirroring status of a pool - // +optional - PoolMirroringStatus `json:",inline"` - // LastChecked is the last time time the status was checked - // +optional - LastChecked string `json:"lastChecked,omitempty"` - // LastChanged is the last time time the status last changed - // +optional - LastChanged string `json:"lastChanged,omitempty"` - // Details contains potential status errors - // +optional - Details string `json:"details,omitempty"` -} - -// PoolMirroringStatus is the pool mirror status -type PoolMirroringStatus struct { - // Summary is the mirroring status summary - // +optional - Summary *PoolMirroringStatusSummarySpec `json:"summary,omitempty"` -} - -// PoolMirroringStatusSummarySpec is the summary output of the command -type PoolMirroringStatusSummarySpec struct { - // Health is the mirroring health - // +optional - Health string `json:"health,omitempty"` - // DaemonHealth is the health of the mirroring daemon - // +optional - DaemonHealth string `json:"daemon_health,omitempty"` - // ImageHealth is the health of the mirrored image - // +optional - ImageHealth string `json:"image_health,omitempty"` - // States is the various state for all mirrored images - // +optional - // +nullable - States StatesSpec `json:"states,omitempty"` -} - -// StatesSpec are rbd images mirroring state -type StatesSpec struct { - // StartingReplay is when the replay of the mirroring journal starts - // +optional - StartingReplay int `json:"starting_replay,omitempty"` - // Replaying is when the replay of the mirroring journal is on-going - // +optional - Replaying int `json:"replaying,omitempty"` - // Syncing is when the image is syncing - // +optional - Syncing int `json:"syncing,omitempty"` - // StopReplaying is when the replay of the mirroring journal stops - // +optional - StopReplaying int `json:"stopping_replay,omitempty"` - // Stopped is when the mirroring state is stopped - // +optional - Stopped int `json:"stopped,omitempty"` - // Unknown is when the mirroring state is unknown - // +optional - Unknown int `json:"unknown,omitempty"` - // Error is when the mirroring state is errored - // +optional - Error int `json:"error,omitempty"` -} - -// MirroringInfoSpec is the status of the pool mirroring -type MirroringInfoSpec struct { - // +optional - *PoolMirroringInfo `json:",inline"` - // +optional - LastChecked string `json:"lastChecked,omitempty"` - // +optional - LastChanged string `json:"lastChanged,omitempty"` - // +optional - Details string `json:"details,omitempty"` -} - -// PoolMirroringInfo is the mirroring info of a given pool -type PoolMirroringInfo struct { - // Mode is the mirroring mode - // +optional - Mode string `json:"mode,omitempty"` - // SiteName is the current site name - // +optional - SiteName string `json:"site_name,omitempty"` - // Peers are the list of peer sites connected to that cluster - // +optional - Peers []PeersSpec `json:"peers,omitempty"` -} - -// PeersSpec contains peer details -type PeersSpec struct { - // UUID is the peer UUID - // +optional - UUID string `json:"uuid,omitempty"` - // Direction is the peer mirroring direction - // +optional - Direction string `json:"direction,omitempty"` - // SiteName is the current site name - // +optional - SiteName string `json:"site_name,omitempty"` - // MirrorUUID is the mirror UUID - // +optional - MirrorUUID string `json:"mirror_uuid,omitempty"` - // ClientName is the CephX user used to connect to the peer - // +optional - ClientName string `json:"client_name,omitempty"` -} - -// SnapshotScheduleStatusSpec is the status of the snapshot schedule -type SnapshotScheduleStatusSpec struct { - // SnapshotSchedules is the list of snapshots scheduled - // +nullable - // +optional - SnapshotSchedules []SnapshotSchedulesSpec `json:"snapshotSchedules,omitempty"` - // LastChecked is the last time time the status was checked - // +optional - LastChecked string `json:"lastChecked,omitempty"` - // LastChanged is the last time time the status last changed - // +optional - LastChanged string `json:"lastChanged,omitempty"` - // Details contains potential status errors - // +optional - Details string `json:"details,omitempty"` -} - -// SnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool -type SnapshotSchedulesSpec struct { - // Pool is the pool name - // +optional - Pool string `json:"pool,omitempty"` - // Namespace is the RADOS namespace the image is part of - // +optional - Namespace string `json:"namespace,omitempty"` - // Image is the mirrored image - // +optional - Image string `json:"image,omitempty"` - // Items is the list schedules times for a given snapshot - // +optional - Items []SnapshotSchedule `json:"items,omitempty"` -} - -// SnapshotSchedule is a schedule -type SnapshotSchedule struct { - // Interval is the interval in which snapshots will be taken - // +optional - Interval string `json:"interval,omitempty"` - // StartTime is the snapshot starting time - // +optional - StartTime string `json:"start_time,omitempty"` -} - -// Status represents the status of an object -type Status struct { - // +optional - Phase string `json:"phase,omitempty"` -} - -// ReplicatedSpec represents the spec for replication in a pool -type ReplicatedSpec struct { - // Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) - // +kubebuilder:validation:Minimum=0 - Size uint `json:"size"` - - // TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity - // +optional - TargetSizeRatio float64 `json:"targetSizeRatio,omitempty"` - - // RequireSafeReplicaSize if false allows you to set replica 1 - // +optional - RequireSafeReplicaSize bool `json:"requireSafeReplicaSize,omitempty"` - - // ReplicasPerFailureDomain the number of replica in the specified failure domain - // +kubebuilder:validation:Minimum=1 - // +optional - ReplicasPerFailureDomain uint `json:"replicasPerFailureDomain,omitempty"` - - // SubFailureDomain the name of the sub-failure domain - // +optional - SubFailureDomain string `json:"subFailureDomain,omitempty"` - - // HybridStorage represents hybrid storage tier settings - // +optional - // +nullable - HybridStorage *HybridStorageSpec `json:"hybridStorage,omitempty"` -} - -// HybridStorageSpec represents the settings for hybrid storage pool -type HybridStorageSpec struct { - // PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required - // +required - PrimaryDeviceClass string `json:"primaryDeviceClass"` - // SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required - // +required - SecondaryDeviceClass string `json:"secondaryDeviceClass"` -} - -// MirroringSpec represents the setting for a mirrored pool -type MirroringSpec struct { - // Enabled whether this pool is mirrored or not - // +optional - Enabled bool `json:"enabled,omitempty"` - - // Mode is the mirroring mode: either pool or image - // +optional - Mode string `json:"mode,omitempty"` - - // SnapshotSchedules is the scheduling of snapshot for mirrored images/pools - // +optional - SnapshotSchedules []SnapshotScheduleSpec `json:"snapshotSchedules,omitempty"` - - // Peers represents the peers spec - // +nullable - // +optional - Peers *MirroringPeerSpec `json:"peers,omitempty"` -} - -// SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool -type SnapshotScheduleSpec struct { - // Path is the path to snapshot, only valid for CephFS - // +optional - Path string `json:"path,omitempty"` - - // Interval represent the periodicity of the snapshot. - // +optional - Interval string `json:"interval,omitempty"` - - // StartTime indicates when to start the snapshot - // +optional - StartTime string `json:"startTime,omitempty"` -} - -// QuotaSpec represents the spec for quotas in a pool -type QuotaSpec struct { - // MaxBytes represents the quota in bytes - // Deprecated in favor of MaxSize - // +optional - MaxBytes *uint64 `json:"maxBytes,omitempty"` - - // MaxSize represents the quota in bytes as a string - // +kubebuilder:validation:Pattern=`^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$` - // +optional - MaxSize *string `json:"maxSize,omitempty"` - - // MaxObjects represents the quota in objects - // +optional - MaxObjects *uint64 `json:"maxObjects,omitempty"` -} - -// ErasureCodedSpec represents the spec for erasure code in a pool -type ErasureCodedSpec struct { - // Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=9 - CodingChunks uint `json:"codingChunks"` - - // Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=9 - DataChunks uint `json:"dataChunks"` - - // The algorithm for erasure coding - // +optional - Algorithm string `json:"algorithm,omitempty"` -} - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephFilesystem represents a Ceph Filesystem -// +kubebuilder:printcolumn:name="ActiveMDS",type=string,JSONPath=`.spec.metadataServer.activeCount`,description="Number of desired active MDS daemons" -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` -// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:subresource:status -type CephFilesystem struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec FilesystemSpec `json:"spec"` - // +kubebuilder:pruning:PreserveUnknownFields - Status *CephFilesystemStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephFilesystemList represents a list of Ceph Filesystems -type CephFilesystemList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephFilesystem `json:"items"` -} - -// FilesystemSpec represents the spec of a file system -type FilesystemSpec struct { - // The metadata pool settings - // +nullable - MetadataPool PoolSpec `json:"metadataPool"` - - // The data pool settings - // +nullable - DataPools []PoolSpec `json:"dataPools"` - - // Preserve pools on filesystem deletion - // +optional - PreservePoolsOnDelete bool `json:"preservePoolsOnDelete,omitempty"` - - // Preserve the fs in the cluster on CephFilesystem CR deletion. Setting this to true automatically implies PreservePoolsOnDelete is true. - // +optional - PreserveFilesystemOnDelete bool `json:"preserveFilesystemOnDelete,omitempty"` - - // The mds pod info - MetadataServer MetadataServerSpec `json:"metadataServer"` - - // The mirroring settings - // +nullable - // +optional - Mirroring *FSMirroringSpec `json:"mirroring,omitempty"` - - // The mirroring statusCheck - // +kubebuilder:pruning:PreserveUnknownFields - StatusCheck MirrorHealthCheckSpec `json:"statusCheck,omitempty"` -} - -// MetadataServerSpec represents the specification of a Ceph Metadata Server -type MetadataServerSpec struct { - // The number of metadata servers that are active. The remaining servers in the cluster will be in standby mode. - // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Maximum=10 - ActiveCount int32 `json:"activeCount"` - - // Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. - // If false, standbys will still be available, but will not have a warm metadata cache. - // +optional - ActiveStandby bool `json:"activeStandby,omitempty"` - - // The affinity to place the mds pods (default is to place on all available node) with a daemonset - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Placement Placement `json:"placement,omitempty"` - - // The annotations-related configuration to add/set on each Pod related object. - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Annotations rook.Annotations `json:"annotations,omitempty"` - - // The labels-related configuration to add/set on each Pod related object. - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Labels rook.Labels `json:"labels,omitempty"` - - // The resource requirements for the rgw pods - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Resources v1.ResourceRequirements `json:"resources,omitempty"` - - // PriorityClassName sets priority classes on components - // +optional - PriorityClassName string `json:"priorityClassName,omitempty"` -} - -// FSMirroringSpec represents the setting for a mirrored filesystem -type FSMirroringSpec struct { - // Enabled whether this filesystem is mirrored or not - // +optional - Enabled bool `json:"enabled,omitempty"` - - // Peers represents the peers spec - // +nullable - // +optional - Peers *MirroringPeerSpec `json:"peers,omitempty"` - - // SnapshotSchedules is the scheduling of snapshot for mirrored filesystems - // +optional - SnapshotSchedules []SnapshotScheduleSpec `json:"snapshotSchedules,omitempty"` - - // Retention is the retention policy for a snapshot schedule - // One path has exactly one retention policy. - // A policy can however contain multiple count-time period pairs in order to specify complex retention policies - // +optional - SnapshotRetention []SnapshotScheduleRetentionSpec `json:"snapshotRetention,omitempty"` -} - -// SnapshotScheduleRetentionSpec is a retention policy -type SnapshotScheduleRetentionSpec struct { - // Path is the path to snapshot - // +optional - Path string `json:"path,omitempty"` - - // Duration represents the retention duration for a snapshot - // +optional - Duration string `json:"duration,omitempty"` -} - -// CephFilesystemStatus represents the status of a Ceph Filesystem -type CephFilesystemStatus struct { - // +optional - Phase ConditionType `json:"phase,omitempty"` - // +optional - SnapshotScheduleStatus *FilesystemSnapshotScheduleStatusSpec `json:"snapshotScheduleStatus,omitempty"` - // Use only info and put mirroringStatus in it? - // +optional - // +nullable - Info map[string]string `json:"info,omitempty"` - // MirroringStatus is the filesystem mirroring status - // +optional - MirroringStatus *FilesystemMirroringInfoSpec `json:"mirroringStatus,omitempty"` -} - -// FilesystemMirroringInfo is the status of the pool mirroring -type FilesystemMirroringInfoSpec struct { - // PoolMirroringStatus is the mirroring status of a filesystem - // +nullable - // +optional - FilesystemMirroringAllInfo []FilesystemMirroringInfo `json:"daemonsStatus,omitempty"` - // LastChecked is the last time time the status was checked - // +optional - LastChecked string `json:"lastChecked,omitempty"` - // LastChanged is the last time time the status last changed - // +optional - LastChanged string `json:"lastChanged,omitempty"` - // Details contains potential status errors - // +optional - Details string `json:"details,omitempty"` -} - -// FilesystemSnapshotScheduleStatusSpec is the status of the snapshot schedule -type FilesystemSnapshotScheduleStatusSpec struct { - // SnapshotSchedules is the list of snapshots scheduled - // +nullable - // +optional - SnapshotSchedules []FilesystemSnapshotSchedulesSpec `json:"snapshotSchedules,omitempty"` - // LastChecked is the last time time the status was checked - // +optional - LastChecked string `json:"lastChecked,omitempty"` - // LastChanged is the last time time the status last changed - // +optional - LastChanged string `json:"lastChanged,omitempty"` - // Details contains potential status errors - // +optional - Details string `json:"details,omitempty"` -} - -// FilesystemSnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool -type FilesystemSnapshotSchedulesSpec struct { - // Fs is the name of the Ceph Filesystem - // +optional - Fs string `json:"fs,omitempty"` - // Subvol is the name of the sub volume - // +optional - Subvol string `json:"subvol,omitempty"` - // Path is the path on the filesystem - // +optional - Path string `json:"path,omitempty"` - // +optional - RelPath string `json:"rel_path,omitempty"` - // +optional - Schedule string `json:"schedule,omitempty"` - // +optional - Retention FilesystemSnapshotScheduleStatusRetention `json:"retention,omitempty"` -} - -// FilesystemSnapshotScheduleStatusRetention is the retention specification for a filesystem snapshot schedule -type FilesystemSnapshotScheduleStatusRetention struct { - // Start is when the snapshot schedule starts - // +optional - Start string `json:"start,omitempty"` - // Created is when the snapshot schedule was created - // +optional - Created string `json:"created,omitempty"` - // First is when the first snapshot schedule was taken - // +optional - First string `json:"first,omitempty"` - // Last is when the last snapshot schedule was taken - // +optional - Last string `json:"last,omitempty"` - // LastPruned is when the last snapshot schedule was pruned - // +optional - LastPruned string `json:"last_pruned,omitempty"` - // CreatedCount is total amount of snapshots - // +optional - CreatedCount int `json:"created_count,omitempty"` - // PrunedCount is total amount of pruned snapshots - // +optional - PrunedCount int `json:"pruned_count,omitempty"` - // Active is whether the scheduled is active or not - // +optional - Active bool `json:"active,omitempty"` -} - -// FilesystemMirrorInfoSpec is the filesystem mirror status of a given filesystem -type FilesystemMirroringInfo struct { - // DaemonID is the cephfs-mirror name - // +optional - DaemonID int `json:"daemon_id,omitempty"` - // Filesystems is the list of filesystems managed by a given cephfs-mirror daemon - // +optional - Filesystems []FilesystemsSpec `json:"filesystems,omitempty"` -} - -// FilesystemsSpec is spec for the mirrored filesystem -type FilesystemsSpec struct { - // FilesystemID is the filesystem identifier - // +optional - FilesystemID int `json:"filesystem_id,omitempty"` - // Name is name of the filesystem - // +optional - Name string `json:"name,omitempty"` - // DirectoryCount is the number of directories in the filesystem - // +optional - DirectoryCount int `json:"directory_count,omitempty"` - // Peers represents the mirroring peers - // +optional - Peers []FilesystemMirrorInfoPeerSpec `json:"peers,omitempty"` -} - -// FilesystemMirrorInfoPeerSpec is the specification of a filesystem peer mirror -type FilesystemMirrorInfoPeerSpec struct { - // UUID is the peer unique identifier - // +optional - UUID string `json:"uuid,omitempty"` - // Remote are the remote cluster information - // +optional - Remote *PeerRemoteSpec `json:"remote,omitempty"` - // Stats are the stat a peer mirror - // +optional - Stats *PeerStatSpec `json:"stats,omitempty"` -} - -type PeerRemoteSpec struct { - // ClientName is cephx name - // +optional - ClientName string `json:"client_name,omitempty"` - // ClusterName is the name of the cluster - // +optional - ClusterName string `json:"cluster_name,omitempty"` - // FsName is the filesystem name - // +optional - FsName string `json:"fs_name,omitempty"` -} - -// PeerStatSpec are the mirror stat with a given peer -type PeerStatSpec struct { - // FailureCount is the number of mirroring failure - // +optional - FailureCount int `json:"failure_count,omitempty"` - // RecoveryCount is the number of recovery attempted after failures - // +optional - RecoveryCount int `json:"recovery_count,omitempty"` -} - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephObjectStore represents a Ceph Object Store Gateway -// +kubebuilder:subresource:status -type CephObjectStore struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec ObjectStoreSpec `json:"spec"` - // +kubebuilder:pruning:PreserveUnknownFields - Status *ObjectStoreStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephObjectStoreList represents a Ceph Object Store Gateways -type CephObjectStoreList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephObjectStore `json:"items"` -} - -// ObjectStoreSpec represent the spec of a pool -type ObjectStoreSpec struct { - // The metadata pool settings - // +optional - // +nullable - MetadataPool PoolSpec `json:"metadataPool,omitempty"` - - // The data pool settings - // +optional - // +nullable - DataPool PoolSpec `json:"dataPool,omitempty"` - - // Preserve pools on object store deletion - // +optional - PreservePoolsOnDelete bool `json:"preservePoolsOnDelete,omitempty"` - - // The rgw pod info - // +optional - // +nullable - Gateway GatewaySpec `json:"gateway"` - - // The multisite info - // +optional - // +nullable - Zone ZoneSpec `json:"zone,omitempty"` - - // The rgw Bucket healthchecks and liveness probe - // +optional - // +nullable - HealthCheck BucketHealthCheckSpec `json:"healthCheck,omitempty"` - - // Security represents security settings - // +optional - // +nullable - Security *SecuritySpec `json:"security,omitempty"` -} - -// BucketHealthCheckSpec represents the health check of an object store -type BucketHealthCheckSpec struct { - // +optional - Bucket HealthCheckSpec `json:"bucket,omitempty"` - // +optional - LivenessProbe *ProbeSpec `json:"livenessProbe,omitempty"` -} - -// HealthCheckSpec represents the health check of an object store bucket -type HealthCheckSpec struct { - // +optional - Disabled bool `json:"disabled,omitempty"` - // Interval is the internal in second or minute for the health check to run like 60s for 60 seconds - // +optional - Interval *metav1.Duration `json:"interval,omitempty"` - // +optional - Timeout string `json:"timeout,omitempty"` -} - -// GatewaySpec represents the specification of Ceph Object Store Gateway -type GatewaySpec struct { - // The port the rgw service will be listening on (http) - // +optional - Port int32 `json:"port,omitempty"` - - // The port the rgw service will be listening on (https) - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=65535 - // +nullable - // +optional - SecurePort int32 `json:"securePort,omitempty"` - - // The number of pods in the rgw replicaset. - // +nullable - // +optional - Instances int32 `json:"instances,omitempty"` - - // The name of the secret that stores the ssl certificate for secure rgw connections - // +nullable - // +optional - SSLCertificateRef string `json:"sslCertificateRef,omitempty"` - - // The name of the secret that stores custom ca-bundle with root and intermediate certificates. - // +nullable - // +optional - CaBundleRef string `json:"caBundleRef,omitempty"` - - // The affinity to place the rgw pods (default is to place on any available node) - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Placement Placement `json:"placement,omitempty"` - - // The annotations-related configuration to add/set on each Pod related object. - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Annotations rook.Annotations `json:"annotations,omitempty"` - - // The labels-related configuration to add/set on each Pod related object. - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Labels rook.Labels `json:"labels,omitempty"` - - // The resource requirements for the rgw pods - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Resources v1.ResourceRequirements `json:"resources,omitempty"` - - // PriorityClassName sets priority classes on the rgw pods - // +optional - PriorityClassName string `json:"priorityClassName,omitempty"` - - // ExternalRgwEndpoints points to external rgw endpoint(s) - // +nullable - // +optional - ExternalRgwEndpoints []v1.EndpointAddress `json:"externalRgwEndpoints,omitempty"` - - // The configuration related to add/set on each rgw service. - // +optional - // +nullable - Service *RGWServiceSpec `json:"service,omitempty"` -} - -// ZoneSpec represents a Ceph Object Store Gateway Zone specification -type ZoneSpec struct { - // RGW Zone the Object Store is in - Name string `json:"name"` -} - -// ObjectStoreStatus represents the status of a Ceph Object Store resource -type ObjectStoreStatus struct { - // +optional - Phase ConditionType `json:"phase,omitempty"` - // +optional - Message string `json:"message,omitempty"` - // +optional - BucketStatus *BucketStatus `json:"bucketStatus,omitempty"` - // +optional - // +nullable - Info map[string]string `json:"info,omitempty"` - Conditions []Condition `json:"conditions,omitempty"` -} - -// BucketStatus represents the status of a bucket -type BucketStatus struct { - // +optional - Health ConditionType `json:"health,omitempty"` - // +optional - Details string `json:"details,omitempty"` - // +optional - LastChecked string `json:"lastChecked,omitempty"` - // +optional - LastChanged string `json:"lastChanged,omitempty"` -} - -// CephObjectStoreUser represents a Ceph Object Store Gateway User -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:resource:shortName=rcou;objectuser -// +kubebuilder:subresource:status -type CephObjectStoreUser struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec ObjectStoreUserSpec `json:"spec"` - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - Status *ObjectStoreUserStatus `json:"status,omitempty"` -} - -// ObjectStoreUserStatus represents the status Ceph Object Store Gateway User -type ObjectStoreUserStatus struct { - // +optional - Phase string `json:"phase,omitempty"` - // +optional - // +nullable - Info map[string]string `json:"info,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephObjectStoreUserList represents a list Ceph Object Store Gateway Users -type CephObjectStoreUserList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephObjectStoreUser `json:"items"` -} - -// ObjectStoreUserSpec represent the spec of an Objectstoreuser -type ObjectStoreUserSpec struct { - //The store the user will be created in - // +optional - Store string `json:"store,omitempty"` - //The display name for the ceph users - // +optional - DisplayName string `json:"displayName,omitempty"` -} - -// CephObjectRealm represents a Ceph Object Store Gateway Realm -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -type CephObjectRealm struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - // +nullable - // +optional - Spec ObjectRealmSpec `json:"spec,omitempty"` - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - Status *Status `json:"status,omitempty"` -} - -// CephObjectRealmList represents a list Ceph Object Store Gateway Realms -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type CephObjectRealmList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephObjectRealm `json:"items"` -} - -// ObjectRealmSpec represent the spec of an ObjectRealm -type ObjectRealmSpec struct { - Pull PullSpec `json:"pull"` -} - -// PullSpec represents the pulling specification of a Ceph Object Storage Gateway Realm -type PullSpec struct { - Endpoint string `json:"endpoint"` -} - -// CephObjectZoneGroup represents a Ceph Object Store Gateway Zone Group -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -type CephObjectZoneGroup struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec ObjectZoneGroupSpec `json:"spec"` - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - Status *Status `json:"status,omitempty"` -} - -// CephObjectZoneGroupList represents a list Ceph Object Store Gateway Zone Groups -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type CephObjectZoneGroupList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephObjectZoneGroup `json:"items"` -} - -// ObjectZoneGroupSpec represent the spec of an ObjectZoneGroup -type ObjectZoneGroupSpec struct { - //The display name for the ceph users - Realm string `json:"realm"` -} - -// CephObjectZone represents a Ceph Object Store Gateway Zone -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -type CephObjectZone struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec ObjectZoneSpec `json:"spec"` - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - Status *Status `json:"status,omitempty"` -} - -// CephObjectZoneList represents a list Ceph Object Store Gateway Zones -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type CephObjectZoneList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephObjectZone `json:"items"` -} - -// ObjectZoneSpec represent the spec of an ObjectZone -type ObjectZoneSpec struct { - //The display name for the ceph users - ZoneGroup string `json:"zoneGroup"` - - // The metadata pool settings - // +nullable - MetadataPool PoolSpec `json:"metadataPool"` - - // The data pool settings - // +nullable - DataPool PoolSpec `json:"dataPool"` -} - -// RGWServiceSpec represent the spec for RGW service -type RGWServiceSpec struct { - // The annotations-related configuration to add/set on each rgw service. - // nullable - // optional - Annotations rook.Annotations `json:"annotations,omitempty"` -} - -// CephNFS represents a Ceph NFS -// +genclient -// +genclient:noStatus -// +kubebuilder:resource:shortName=nfs,path=cephnfses -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -type CephNFS struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec NFSGaneshaSpec `json:"spec"` - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - Status *Status `json:"status,omitempty"` -} - -// CephNFSList represents a list Ceph NFSes -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type CephNFSList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephNFS `json:"items"` -} - -// NFSGaneshaSpec represents the spec of an nfs ganesha server -type NFSGaneshaSpec struct { - // RADOS is the Ganesha RADOS specification - RADOS GaneshaRADOSSpec `json:"rados"` - - // Server is the Ganesha Server specification - Server GaneshaServerSpec `json:"server"` -} - -// GaneshaRADOSSpec represents the specification of a Ganesha RADOS object -type GaneshaRADOSSpec struct { - // Pool is the RADOS pool where NFS client recovery data is stored. - Pool string `json:"pool"` - - // Namespace is the RADOS namespace where NFS client recovery data is stored. - Namespace string `json:"namespace"` -} - -// GaneshaServerSpec represents the specification of a Ganesha Server -type GaneshaServerSpec struct { - // The number of active Ganesha servers - Active int `json:"active"` - - // The affinity to place the ganesha pods - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Placement Placement `json:"placement,omitempty"` - - // The annotations-related configuration to add/set on each Pod related object. - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Annotations rook.Annotations `json:"annotations,omitempty"` - - // The labels-related configuration to add/set on each Pod related object. - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Labels rook.Labels `json:"labels,omitempty"` - - // Resources set resource requests and limits - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Resources v1.ResourceRequirements `json:"resources,omitempty"` - - // PriorityClassName sets the priority class on the pods - // +optional - PriorityClassName string `json:"priorityClassName,omitempty"` - - // LogLevel set logging level - // +optional - LogLevel string `json:"logLevel,omitempty"` -} - -// NetworkSpec for Ceph includes backward compatibility code -type NetworkSpec struct { - // Provider is what provides network connectivity to the cluster e.g. "host" or "multus" - // +nullable - // +optional - Provider string `json:"provider,omitempty"` - - // Selectors string values describe what networks will be used to connect the cluster. - // Meanwhile the keys describe each network respective responsibilities or any metadata - // storage provider decide. - // +nullable - // +optional - Selectors map[string]string `json:"selectors,omitempty"` - - // HostNetwork to enable host network - // +optional - HostNetwork bool `json:"hostNetwork,omitempty"` - - // IPFamily is the single stack IPv6 or IPv4 protocol - // +kubebuilder:validation:Enum=IPv4;IPv6 - // +kubebuilder:default=IPv4 - // +nullable - // +optional - IPFamily IPFamilyType `json:"ipFamily,omitempty"` - - // DualStack determines whether Ceph daemons should listen on both IPv4 and IPv6 - // +optional - DualStack bool `json:"dualStack,omitempty"` -} - -// DisruptionManagementSpec configures management of daemon disruptions -type DisruptionManagementSpec struct { - // This enables management of poddisruptionbudgets - // +optional - ManagePodBudgets bool `json:"managePodBudgets,omitempty"` - - // OSDMaintenanceTimeout sets how many additional minutes the DOWN/OUT interval is for drained failure domains - // it only works if managePodBudgets is true. - // the default is 30 minutes - // +optional - OSDMaintenanceTimeout time.Duration `json:"osdMaintenanceTimeout,omitempty"` - - // PGHealthCheckTimeout is the time (in minutes) that the operator will wait for the placement groups to become - // healthy (active+clean) after a drain was completed and OSDs came back up. Rook will continue with the next drain - // if the timeout exceeds. It only works if managePodBudgets is true. - // No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. - // +optional - PGHealthCheckTimeout time.Duration `json:"pgHealthCheckTimeout,omitempty"` - - // This enables management of machinedisruptionbudgets - // +optional - ManageMachineDisruptionBudgets bool `json:"manageMachineDisruptionBudgets,omitempty"` - - // Namespace to look for MDBs by the machineDisruptionBudgetController - // +optional - MachineDisruptionBudgetNamespace string `json:"machineDisruptionBudgetNamespace,omitempty"` -} - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephClient represents a Ceph Client -// +kubebuilder:subresource:status -type CephClient struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - // Spec represents the specification of a Ceph Client - Spec ClientSpec `json:"spec"` - // Status represents the status of a Ceph Client - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - Status *CephClientStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephClientList represents a list of Ceph Clients -type CephClientList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephClient `json:"items"` -} - -// ClientSpec represents the specification of a Ceph Client -type ClientSpec struct { - // +optional - Name string `json:"name,omitempty"` - // +kubebuilder:pruning:PreserveUnknownFields - Caps map[string]string `json:"caps"` -} - -// CephClientStatus represents the Status of Ceph Client -type CephClientStatus struct { - // +optional - Phase ConditionType `json:"phase,omitempty"` - // +optional - // +nullable - Info map[string]string `json:"info,omitempty"` -} - -// CleanupPolicySpec represents a Ceph Cluster cleanup policy -type CleanupPolicySpec struct { - // Confirmation represents the cleanup confirmation - // +optional - // +nullable - Confirmation CleanupConfirmationProperty `json:"confirmation,omitempty"` - // SanitizeDisks represents way we sanitize disks - // +optional - // +nullable - SanitizeDisks SanitizeDisksSpec `json:"sanitizeDisks,omitempty"` - // AllowUninstallWithVolumes defines whether we can proceed with the uninstall if they are RBD images still present - // +optional - AllowUninstallWithVolumes bool `json:"allowUninstallWithVolumes,omitempty"` -} - -// CleanupConfirmationProperty represents the cleanup confirmation -// +kubebuilder:validation:Pattern=`^$|^yes-really-destroy-data$` -type CleanupConfirmationProperty string - -// SanitizeDataSourceProperty represents a sanitizing data source -type SanitizeDataSourceProperty string - -// SanitizeMethodProperty represents a disk sanitizing method -type SanitizeMethodProperty string - -// SanitizeDisksSpec represents a disk sanitizing specification -type SanitizeDisksSpec struct { - // Method is the method we use to sanitize disks - // +optional - // +kubebuilder:validation:Enum=complete;quick - Method SanitizeMethodProperty `json:"method,omitempty"` - // DataSource is the data source to use to sanitize the disk with - // +optional - // +kubebuilder:validation:Enum=zero;random - DataSource SanitizeDataSourceProperty `json:"dataSource,omitempty"` - // Iteration is the number of pass to apply the sanitizing - // +optional - Iteration int32 `json:"iteration,omitempty"` -} - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephRBDMirror represents a Ceph RBD Mirror -// +kubebuilder:subresource:status -type CephRBDMirror struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec RBDMirroringSpec `json:"spec"` - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - Status *Status `json:"status,omitempty"` -} - -// CephRBDMirrorList represents a list Ceph RBD Mirrors -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type CephRBDMirrorList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephRBDMirror `json:"items"` -} - -// RBDMirroringSpec represents the specification of an RBD mirror daemon -type RBDMirroringSpec struct { - // Count represents the number of rbd mirror instance to run - // +kubebuilder:validation:Minimum=1 - Count int `json:"count"` - - // Peers represents the peers spec - // +nullable - // +optional - Peers MirroringPeerSpec `json:"peers,omitempty"` - - // The affinity to place the rgw pods (default is to place on any available node) - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Placement Placement `json:"placement,omitempty"` - - // The annotations-related configuration to add/set on each Pod related object. - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Annotations rook.Annotations `json:"annotations,omitempty"` - - // The labels-related configuration to add/set on each Pod related object. - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Labels rook.Labels `json:"labels,omitempty"` - - // The resource requirements for the rbd mirror pods - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Resources v1.ResourceRequirements `json:"resources,omitempty"` - - // PriorityClassName sets priority class on the rbd mirror pods - // +optional - PriorityClassName string `json:"priorityClassName,omitempty"` -} - -// MirroringPeerSpec represents the specification of a mirror peer -type MirroringPeerSpec struct { - // SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers - // +optional - SecretNames []string `json:"secretNames,omitempty"` -} - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephFilesystemMirror is the Ceph Filesystem Mirror object definition -// +kubebuilder:subresource:status -type CephFilesystemMirror struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec FilesystemMirroringSpec `json:"spec"` - // +optional - Status *Status `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CephFilesystemMirrorList is a list of CephFilesystemMirror -type CephFilesystemMirrorList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []CephFilesystemMirror `json:"items"` -} - -// FilesystemMirroringSpec is the filesystem mirroring specification -type FilesystemMirroringSpec struct { - // The affinity to place the rgw pods (default is to place on any available node) - // +nullable - // +optional - Placement Placement `json:"placement,omitempty"` - - // The annotations-related configuration to add/set on each Pod related object. - // +nullable - // +optional - Annotations rook.Annotations `json:"annotations,omitempty"` - - // The labels-related configuration to add/set on each Pod related object. - // +nullable - // +optional - Labels rook.Labels `json:"labels,omitempty"` - - // The resource requirements for the cephfs-mirror pods - // +nullable - // +optional - Resources v1.ResourceRequirements `json:"resources,omitempty"` - - // PriorityClassName sets priority class on the cephfs-mirror pods - // +optional - PriorityClassName string `json:"priorityClassName,omitempty"` -} - -// IPFamilyType represents the single stack Ipv4 or Ipv6 protocol. -type IPFamilyType string - -const ( - // IPv6 internet protocol version - IPv6 IPFamilyType = "IPv6" - // IPv4 internet protocol version - IPv4 IPFamilyType = "IPv4" -) - -type StorageScopeSpec struct { - // +nullable - // +optional - Nodes []Node `json:"nodes,omitempty"` - // +optional - UseAllNodes bool `json:"useAllNodes,omitempty"` - // +optional - OnlyApplyOSDPlacement bool `json:"onlyApplyOSDPlacement,omitempty"` - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Config map[string]string `json:"config,omitempty"` - Selection `json:",inline"` - // +nullable - // +optional - StorageClassDeviceSets []StorageClassDeviceSet `json:"storageClassDeviceSets,omitempty"` -} - -// Node is a storage nodes -// +nullable -type Node struct { - // +optional - Name string `json:"name,omitempty"` - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Resources v1.ResourceRequirements `json:"resources,omitempty"` - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Config map[string]string `json:"config,omitempty"` - Selection `json:",inline"` -} - -// Device represents a disk to use in the cluster -type Device struct { - // +optional - Name string `json:"name,omitempty"` - // +optional - FullPath string `json:"fullpath,omitempty"` - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Config map[string]string `json:"config,omitempty"` -} - -type Selection struct { - // Whether to consume all the storage devices found on a machine - // +optional - UseAllDevices *bool `json:"useAllDevices,omitempty"` - // A regular expression to allow more fine-grained selection of devices on nodes across the cluster - // +optional - DeviceFilter string `json:"deviceFilter,omitempty"` - // A regular expression to allow more fine-grained selection of devices with path names - // +optional - DevicePathFilter string `json:"devicePathFilter,omitempty"` - // List of devices to use as storage devices - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Devices []Device `json:"devices,omitempty"` - // PersistentVolumeClaims to use as storage - // +optional - VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` -} - -// PlacementSpec is the placement for core ceph daemons part of the CephCluster CRD -type PlacementSpec map[rook.KeyType]Placement - -// Placement is the placement for an object -type Placement struct { - // NodeAffinity is a group of node affinity scheduling rules - // +optional - NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"` - // PodAffinity is a group of inter pod affinity scheduling rules - // +optional - PodAffinity *v1.PodAffinity `json:"podAffinity,omitempty"` - // PodAntiAffinity is a group of inter pod anti affinity scheduling rules - // +optional - PodAntiAffinity *v1.PodAntiAffinity `json:"podAntiAffinity,omitempty"` - // The pod this Toleration is attached to tolerates any taint that matches - // the triple using the matching operator - // +optional - Tolerations []v1.Toleration `json:"tolerations,omitempty"` - // TopologySpreadConstraint specifies how to spread matching pods among the given topology - // +optional - TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` -} - -// ResourceSpec is a collection of ResourceRequirements that describes the compute resource requirements -type ResourceSpec map[string]v1.ResourceRequirements - -// ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon -type ProbeSpec struct { - // Disabled determines whether probe is disable or not - // +optional - Disabled bool `json:"disabled,omitempty"` - // Probe describes a health check to be performed against a container to determine whether it is - // alive or ready to receive traffic. - // +optional - Probe *v1.Probe `json:"probe,omitempty"` -} - -// PriorityClassNamesSpec is a map of priority class names to be assigned to components -type PriorityClassNamesSpec map[rook.KeyType]string - -// StorageClassDeviceSet is a storage class device set -// +nullable -type StorageClassDeviceSet struct { - // Name is a unique identifier for the set - Name string `json:"name"` - // Count is the number of devices in this set - // +kubebuilder:validation:Minimum=1 - Count int `json:"count"` - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Resources v1.ResourceRequirements `json:"resources,omitempty"` // Requests/limits for the devices - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Placement Placement `json:"placement,omitempty"` // Placement constraints for the device daemons - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - PreparePlacement *Placement `json:"preparePlacement,omitempty"` // Placement constraints for the device preparation - // Provider-specific device configuration - // +kubebuilder:pruning:PreserveUnknownFields - // +nullable - // +optional - Config map[string]string `json:"config,omitempty"` - // VolumeClaimTemplates is a list of PVC templates for the underlying storage devices - VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates"` - // Portable represents OSD portability across the hosts - // +optional - Portable bool `json:"portable,omitempty"` - // TuneSlowDeviceClass Tune the OSD when running on a slow Device Class - // +optional - TuneSlowDeviceClass bool `json:"tuneDeviceClass,omitempty"` - // TuneFastDeviceClass Tune the OSD when running on a fast Device Class - // +optional - TuneFastDeviceClass bool `json:"tuneFastDeviceClass,omitempty"` - // Scheduler name for OSD pod placement - // +optional - SchedulerName string `json:"schedulerName,omitempty"` - // Whether to encrypt the deviceSet - // +optional - Encrypted bool `json:"encrypted,omitempty"` -} diff --git a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go deleted file mode 100644 index d52c33b31..000000000 --- a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,3125 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - rookio "github.com/rook/rook/pkg/apis/rook.io" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in AnnotationsSpec) DeepCopyInto(out *AnnotationsSpec) { - { - in := &in - *out = make(AnnotationsSpec, len(*in)) - for key, val := range *in { - var outVal map[string]string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(rookio.Annotations, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - (*out)[key] = outVal - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnnotationsSpec. -func (in AnnotationsSpec) DeepCopy() AnnotationsSpec { - if in == nil { - return nil - } - out := new(AnnotationsSpec) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BucketHealthCheckSpec) DeepCopyInto(out *BucketHealthCheckSpec) { - *out = *in - in.Bucket.DeepCopyInto(&out.Bucket) - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(ProbeSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketHealthCheckSpec. -func (in *BucketHealthCheckSpec) DeepCopy() *BucketHealthCheckSpec { - if in == nil { - return nil - } - out := new(BucketHealthCheckSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. -func (in *BucketStatus) DeepCopy() *BucketStatus { - if in == nil { - return nil - } - out := new(BucketStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Capacity) DeepCopyInto(out *Capacity) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capacity. -func (in *Capacity) DeepCopy() *Capacity { - if in == nil { - return nil - } - out := new(Capacity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephBlockPool) DeepCopyInto(out *CephBlockPool) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(CephBlockPoolStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPool. -func (in *CephBlockPool) DeepCopy() *CephBlockPool { - if in == nil { - return nil - } - out := new(CephBlockPool) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephBlockPool) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephBlockPoolList) DeepCopyInto(out *CephBlockPoolList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephBlockPool, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolList. -func (in *CephBlockPoolList) DeepCopy() *CephBlockPoolList { - if in == nil { - return nil - } - out := new(CephBlockPoolList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephBlockPoolList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephBlockPoolStatus) DeepCopyInto(out *CephBlockPoolStatus) { - *out = *in - if in.MirroringStatus != nil { - in, out := &in.MirroringStatus, &out.MirroringStatus - *out = new(MirroringStatusSpec) - (*in).DeepCopyInto(*out) - } - if in.MirroringInfo != nil { - in, out := &in.MirroringInfo, &out.MirroringInfo - *out = new(MirroringInfoSpec) - (*in).DeepCopyInto(*out) - } - if in.SnapshotScheduleStatus != nil { - in, out := &in.SnapshotScheduleStatus, &out.SnapshotScheduleStatus - *out = new(SnapshotScheduleStatusSpec) - (*in).DeepCopyInto(*out) - } - if in.Info != nil { - in, out := &in.Info, &out.Info - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolStatus. -func (in *CephBlockPoolStatus) DeepCopy() *CephBlockPoolStatus { - if in == nil { - return nil - } - out := new(CephBlockPoolStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephClient) DeepCopyInto(out *CephClient) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(CephClientStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClient. -func (in *CephClient) DeepCopy() *CephClient { - if in == nil { - return nil - } - out := new(CephClient) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephClient) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephClientList) DeepCopyInto(out *CephClientList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephClient, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClientList. -func (in *CephClientList) DeepCopy() *CephClientList { - if in == nil { - return nil - } - out := new(CephClientList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephClientList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephClientStatus) DeepCopyInto(out *CephClientStatus) { - *out = *in - if in.Info != nil { - in, out := &in.Info, &out.Info - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClientStatus. -func (in *CephClientStatus) DeepCopy() *CephClientStatus { - if in == nil { - return nil - } - out := new(CephClientStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephCluster) DeepCopyInto(out *CephCluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephCluster. -func (in *CephCluster) DeepCopy() *CephCluster { - if in == nil { - return nil - } - out := new(CephCluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephCluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephClusterHealthCheckSpec) DeepCopyInto(out *CephClusterHealthCheckSpec) { - *out = *in - in.DaemonHealth.DeepCopyInto(&out.DaemonHealth) - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = make(map[rookio.KeyType]*ProbeSpec, len(*in)) - for key, val := range *in { - var outVal *ProbeSpec - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(ProbeSpec) - (*in).DeepCopyInto(*out) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClusterHealthCheckSpec. -func (in *CephClusterHealthCheckSpec) DeepCopy() *CephClusterHealthCheckSpec { - if in == nil { - return nil - } - out := new(CephClusterHealthCheckSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephClusterList) DeepCopyInto(out *CephClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephCluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClusterList. -func (in *CephClusterList) DeepCopy() *CephClusterList { - if in == nil { - return nil - } - out := new(CephClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephDaemonsVersions) DeepCopyInto(out *CephDaemonsVersions) { - *out = *in - if in.Mon != nil { - in, out := &in.Mon, &out.Mon - *out = make(map[string]int, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Mgr != nil { - in, out := &in.Mgr, &out.Mgr - *out = make(map[string]int, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Osd != nil { - in, out := &in.Osd, &out.Osd - *out = make(map[string]int, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Rgw != nil { - in, out := &in.Rgw, &out.Rgw - *out = make(map[string]int, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Mds != nil { - in, out := &in.Mds, &out.Mds - *out = make(map[string]int, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.RbdMirror != nil { - in, out := &in.RbdMirror, &out.RbdMirror - *out = make(map[string]int, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.CephFSMirror != nil { - in, out := &in.CephFSMirror, &out.CephFSMirror - *out = make(map[string]int, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Overall != nil { - in, out := &in.Overall, &out.Overall - *out = make(map[string]int, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephDaemonsVersions. -func (in *CephDaemonsVersions) DeepCopy() *CephDaemonsVersions { - if in == nil { - return nil - } - out := new(CephDaemonsVersions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFilesystem) DeepCopyInto(out *CephFilesystem) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(CephFilesystemStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystem. -func (in *CephFilesystem) DeepCopy() *CephFilesystem { - if in == nil { - return nil - } - out := new(CephFilesystem) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephFilesystem) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFilesystemList) DeepCopyInto(out *CephFilesystemList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephFilesystem, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemList. -func (in *CephFilesystemList) DeepCopy() *CephFilesystemList { - if in == nil { - return nil - } - out := new(CephFilesystemList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephFilesystemList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFilesystemMirror) DeepCopyInto(out *CephFilesystemMirror) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(Status) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemMirror. -func (in *CephFilesystemMirror) DeepCopy() *CephFilesystemMirror { - if in == nil { - return nil - } - out := new(CephFilesystemMirror) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephFilesystemMirror) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFilesystemMirrorList) DeepCopyInto(out *CephFilesystemMirrorList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephFilesystemMirror, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemMirrorList. -func (in *CephFilesystemMirrorList) DeepCopy() *CephFilesystemMirrorList { - if in == nil { - return nil - } - out := new(CephFilesystemMirrorList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephFilesystemMirrorList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFilesystemStatus) DeepCopyInto(out *CephFilesystemStatus) { - *out = *in - if in.SnapshotScheduleStatus != nil { - in, out := &in.SnapshotScheduleStatus, &out.SnapshotScheduleStatus - *out = new(FilesystemSnapshotScheduleStatusSpec) - (*in).DeepCopyInto(*out) - } - if in.Info != nil { - in, out := &in.Info, &out.Info - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.MirroringStatus != nil { - in, out := &in.MirroringStatus, &out.MirroringStatus - *out = new(FilesystemMirroringInfoSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemStatus. -func (in *CephFilesystemStatus) DeepCopy() *CephFilesystemStatus { - if in == nil { - return nil - } - out := new(CephFilesystemStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephHealthMessage) DeepCopyInto(out *CephHealthMessage) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephHealthMessage. -func (in *CephHealthMessage) DeepCopy() *CephHealthMessage { - if in == nil { - return nil - } - out := new(CephHealthMessage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephNFS) DeepCopyInto(out *CephNFS) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(Status) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephNFS. -func (in *CephNFS) DeepCopy() *CephNFS { - if in == nil { - return nil - } - out := new(CephNFS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephNFS) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephNFSList) DeepCopyInto(out *CephNFSList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephNFS, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephNFSList. -func (in *CephNFSList) DeepCopy() *CephNFSList { - if in == nil { - return nil - } - out := new(CephNFSList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephNFSList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephObjectRealm) DeepCopyInto(out *CephObjectRealm) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(Status) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectRealm. -func (in *CephObjectRealm) DeepCopy() *CephObjectRealm { - if in == nil { - return nil - } - out := new(CephObjectRealm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephObjectRealm) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephObjectRealmList) DeepCopyInto(out *CephObjectRealmList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephObjectRealm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectRealmList. -func (in *CephObjectRealmList) DeepCopy() *CephObjectRealmList { - if in == nil { - return nil - } - out := new(CephObjectRealmList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephObjectRealmList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephObjectStore) DeepCopyInto(out *CephObjectStore) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(ObjectStoreStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectStore. -func (in *CephObjectStore) DeepCopy() *CephObjectStore { - if in == nil { - return nil - } - out := new(CephObjectStore) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephObjectStore) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephObjectStoreList) DeepCopyInto(out *CephObjectStoreList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephObjectStore, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectStoreList. -func (in *CephObjectStoreList) DeepCopy() *CephObjectStoreList { - if in == nil { - return nil - } - out := new(CephObjectStoreList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephObjectStoreList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephObjectStoreUser) DeepCopyInto(out *CephObjectStoreUser) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(ObjectStoreUserStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectStoreUser. -func (in *CephObjectStoreUser) DeepCopy() *CephObjectStoreUser { - if in == nil { - return nil - } - out := new(CephObjectStoreUser) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephObjectStoreUser) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephObjectStoreUserList) DeepCopyInto(out *CephObjectStoreUserList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephObjectStoreUser, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectStoreUserList. -func (in *CephObjectStoreUserList) DeepCopy() *CephObjectStoreUserList { - if in == nil { - return nil - } - out := new(CephObjectStoreUserList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephObjectStoreUserList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephObjectZone) DeepCopyInto(out *CephObjectZone) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(Status) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectZone. -func (in *CephObjectZone) DeepCopy() *CephObjectZone { - if in == nil { - return nil - } - out := new(CephObjectZone) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephObjectZone) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephObjectZoneGroup) DeepCopyInto(out *CephObjectZoneGroup) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(Status) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectZoneGroup. -func (in *CephObjectZoneGroup) DeepCopy() *CephObjectZoneGroup { - if in == nil { - return nil - } - out := new(CephObjectZoneGroup) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephObjectZoneGroup) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephObjectZoneGroupList) DeepCopyInto(out *CephObjectZoneGroupList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephObjectZoneGroup, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectZoneGroupList. -func (in *CephObjectZoneGroupList) DeepCopy() *CephObjectZoneGroupList { - if in == nil { - return nil - } - out := new(CephObjectZoneGroupList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephObjectZoneGroupList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephObjectZoneList) DeepCopyInto(out *CephObjectZoneList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephObjectZone, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectZoneList. -func (in *CephObjectZoneList) DeepCopy() *CephObjectZoneList { - if in == nil { - return nil - } - out := new(CephObjectZoneList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephObjectZoneList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephRBDMirror) DeepCopyInto(out *CephRBDMirror) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(Status) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephRBDMirror. -func (in *CephRBDMirror) DeepCopy() *CephRBDMirror { - if in == nil { - return nil - } - out := new(CephRBDMirror) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephRBDMirror) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephRBDMirrorList) DeepCopyInto(out *CephRBDMirrorList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CephRBDMirror, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephRBDMirrorList. -func (in *CephRBDMirrorList) DeepCopy() *CephRBDMirrorList { - if in == nil { - return nil - } - out := new(CephRBDMirrorList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephRBDMirrorList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephStatus) DeepCopyInto(out *CephStatus) { - *out = *in - if in.Details != nil { - in, out := &in.Details, &out.Details - *out = make(map[string]CephHealthMessage, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.Capacity = in.Capacity - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = new(CephDaemonsVersions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephStatus. -func (in *CephStatus) DeepCopy() *CephStatus { - if in == nil { - return nil - } - out := new(CephStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephStorage) DeepCopyInto(out *CephStorage) { - *out = *in - if in.DeviceClasses != nil { - in, out := &in.DeviceClasses, &out.DeviceClasses - *out = make([]DeviceClasses, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephStorage. -func (in *CephStorage) DeepCopy() *CephStorage { - if in == nil { - return nil - } - out := new(CephStorage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephVersionSpec) DeepCopyInto(out *CephVersionSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephVersionSpec. -func (in *CephVersionSpec) DeepCopy() *CephVersionSpec { - if in == nil { - return nil - } - out := new(CephVersionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CleanupPolicySpec) DeepCopyInto(out *CleanupPolicySpec) { - *out = *in - out.SanitizeDisks = in.SanitizeDisks - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupPolicySpec. -func (in *CleanupPolicySpec) DeepCopy() *CleanupPolicySpec { - if in == nil { - return nil - } - out := new(CleanupPolicySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientSpec) DeepCopyInto(out *ClientSpec) { - *out = *in - if in.Caps != nil { - in, out := &in.Caps, &out.Caps - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientSpec. -func (in *ClientSpec) DeepCopy() *ClientSpec { - if in == nil { - return nil - } - out := new(ClientSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { - *out = *in - out.CephVersion = in.CephVersion - in.Storage.DeepCopyInto(&out.Storage) - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(AnnotationsSpec, len(*in)) - for key, val := range *in { - var outVal map[string]string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(rookio.Annotations, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - (*out)[key] = outVal - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(LabelsSpec, len(*in)) - for key, val := range *in { - var outVal map[string]string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(rookio.Labels, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - (*out)[key] = outVal - } - } - if in.Placement != nil { - in, out := &in.Placement, &out.Placement - *out = make(PlacementSpec, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.Network.DeepCopyInto(&out.Network) - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make(ResourceSpec, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.PriorityClassNames != nil { - in, out := &in.PriorityClassNames, &out.PriorityClassNames - *out = make(PriorityClassNamesSpec, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.DisruptionManagement = in.DisruptionManagement - in.Mon.DeepCopyInto(&out.Mon) - out.CrashCollector = in.CrashCollector - out.Dashboard = in.Dashboard - in.Monitoring.DeepCopyInto(&out.Monitoring) - out.External = in.External - in.Mgr.DeepCopyInto(&out.Mgr) - out.CleanupPolicy = in.CleanupPolicy - in.HealthCheck.DeepCopyInto(&out.HealthCheck) - in.Security.DeepCopyInto(&out.Security) - out.LogCollector = in.LogCollector - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. -func (in *ClusterSpec) DeepCopy() *ClusterSpec { - if in == nil { - return nil - } - out := new(ClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CephStatus != nil { - in, out := &in.CephStatus, &out.CephStatus - *out = new(CephStatus) - (*in).DeepCopyInto(*out) - } - if in.CephStorage != nil { - in, out := &in.CephStorage, &out.CephStorage - *out = new(CephStorage) - (*in).DeepCopyInto(*out) - } - if in.CephVersion != nil { - in, out := &in.CephVersion, &out.CephVersion - *out = new(ClusterVersion) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. -func (in *ClusterStatus) DeepCopy() *ClusterStatus { - if in == nil { - return nil - } - out := new(ClusterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion. -func (in *ClusterVersion) DeepCopy() *ClusterVersion { - if in == nil { - return nil - } - out := new(ClusterVersion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Condition) DeepCopyInto(out *Condition) { - *out = *in - in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. -func (in *Condition) DeepCopy() *Condition { - if in == nil { - return nil - } - out := new(Condition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CrashCollectorSpec) DeepCopyInto(out *CrashCollectorSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrashCollectorSpec. -func (in *CrashCollectorSpec) DeepCopy() *CrashCollectorSpec { - if in == nil { - return nil - } - out := new(CrashCollectorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonHealthSpec) DeepCopyInto(out *DaemonHealthSpec) { - *out = *in - in.Status.DeepCopyInto(&out.Status) - in.Monitor.DeepCopyInto(&out.Monitor) - in.ObjectStorageDaemon.DeepCopyInto(&out.ObjectStorageDaemon) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonHealthSpec. -func (in *DaemonHealthSpec) DeepCopy() *DaemonHealthSpec { - if in == nil { - return nil - } - out := new(DaemonHealthSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DashboardSpec) DeepCopyInto(out *DashboardSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardSpec. -func (in *DashboardSpec) DeepCopy() *DashboardSpec { - if in == nil { - return nil - } - out := new(DashboardSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Device) DeepCopyInto(out *Device) { - *out = *in - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. -func (in *Device) DeepCopy() *Device { - if in == nil { - return nil - } - out := new(Device) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeviceClasses) DeepCopyInto(out *DeviceClasses) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClasses. -func (in *DeviceClasses) DeepCopy() *DeviceClasses { - if in == nil { - return nil - } - out := new(DeviceClasses) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DisruptionManagementSpec) DeepCopyInto(out *DisruptionManagementSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisruptionManagementSpec. -func (in *DisruptionManagementSpec) DeepCopy() *DisruptionManagementSpec { - if in == nil { - return nil - } - out := new(DisruptionManagementSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ErasureCodedSpec) DeepCopyInto(out *ErasureCodedSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErasureCodedSpec. -func (in *ErasureCodedSpec) DeepCopy() *ErasureCodedSpec { - if in == nil { - return nil - } - out := new(ErasureCodedSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalSpec) DeepCopyInto(out *ExternalSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSpec. -func (in *ExternalSpec) DeepCopy() *ExternalSpec { - if in == nil { - return nil - } - out := new(ExternalSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSMirroringSpec) DeepCopyInto(out *FSMirroringSpec) { - *out = *in - if in.Peers != nil { - in, out := &in.Peers, &out.Peers - *out = new(MirroringPeerSpec) - (*in).DeepCopyInto(*out) - } - if in.SnapshotSchedules != nil { - in, out := &in.SnapshotSchedules, &out.SnapshotSchedules - *out = make([]SnapshotScheduleSpec, len(*in)) - copy(*out, *in) - } - if in.SnapshotRetention != nil { - in, out := &in.SnapshotRetention, &out.SnapshotRetention - *out = make([]SnapshotScheduleRetentionSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSMirroringSpec. -func (in *FSMirroringSpec) DeepCopy() *FSMirroringSpec { - if in == nil { - return nil - } - out := new(FSMirroringSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FilesystemMirrorInfoPeerSpec) DeepCopyInto(out *FilesystemMirrorInfoPeerSpec) { - *out = *in - if in.Remote != nil { - in, out := &in.Remote, &out.Remote - *out = new(PeerRemoteSpec) - **out = **in - } - if in.Stats != nil { - in, out := &in.Stats, &out.Stats - *out = new(PeerStatSpec) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemMirrorInfoPeerSpec. -func (in *FilesystemMirrorInfoPeerSpec) DeepCopy() *FilesystemMirrorInfoPeerSpec { - if in == nil { - return nil - } - out := new(FilesystemMirrorInfoPeerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FilesystemMirroringInfo) DeepCopyInto(out *FilesystemMirroringInfo) { - *out = *in - if in.Filesystems != nil { - in, out := &in.Filesystems, &out.Filesystems - *out = make([]FilesystemsSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemMirroringInfo. -func (in *FilesystemMirroringInfo) DeepCopy() *FilesystemMirroringInfo { - if in == nil { - return nil - } - out := new(FilesystemMirroringInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FilesystemMirroringInfoSpec) DeepCopyInto(out *FilesystemMirroringInfoSpec) { - *out = *in - if in.FilesystemMirroringAllInfo != nil { - in, out := &in.FilesystemMirroringAllInfo, &out.FilesystemMirroringAllInfo - *out = make([]FilesystemMirroringInfo, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemMirroringInfoSpec. -func (in *FilesystemMirroringInfoSpec) DeepCopy() *FilesystemMirroringInfoSpec { - if in == nil { - return nil - } - out := new(FilesystemMirroringInfoSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FilesystemMirroringSpec) DeepCopyInto(out *FilesystemMirroringSpec) { - *out = *in - in.Placement.DeepCopyInto(&out.Placement) - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(rookio.Annotations, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(rookio.Labels, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Resources.DeepCopyInto(&out.Resources) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemMirroringSpec. -func (in *FilesystemMirroringSpec) DeepCopy() *FilesystemMirroringSpec { - if in == nil { - return nil - } - out := new(FilesystemMirroringSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FilesystemSnapshotScheduleStatusRetention) DeepCopyInto(out *FilesystemSnapshotScheduleStatusRetention) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemSnapshotScheduleStatusRetention. -func (in *FilesystemSnapshotScheduleStatusRetention) DeepCopy() *FilesystemSnapshotScheduleStatusRetention { - if in == nil { - return nil - } - out := new(FilesystemSnapshotScheduleStatusRetention) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FilesystemSnapshotScheduleStatusSpec) DeepCopyInto(out *FilesystemSnapshotScheduleStatusSpec) { - *out = *in - if in.SnapshotSchedules != nil { - in, out := &in.SnapshotSchedules, &out.SnapshotSchedules - *out = make([]FilesystemSnapshotSchedulesSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemSnapshotScheduleStatusSpec. -func (in *FilesystemSnapshotScheduleStatusSpec) DeepCopy() *FilesystemSnapshotScheduleStatusSpec { - if in == nil { - return nil - } - out := new(FilesystemSnapshotScheduleStatusSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FilesystemSnapshotSchedulesSpec) DeepCopyInto(out *FilesystemSnapshotSchedulesSpec) { - *out = *in - out.Retention = in.Retention - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemSnapshotSchedulesSpec. -func (in *FilesystemSnapshotSchedulesSpec) DeepCopy() *FilesystemSnapshotSchedulesSpec { - if in == nil { - return nil - } - out := new(FilesystemSnapshotSchedulesSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FilesystemSpec) DeepCopyInto(out *FilesystemSpec) { - *out = *in - in.MetadataPool.DeepCopyInto(&out.MetadataPool) - if in.DataPools != nil { - in, out := &in.DataPools, &out.DataPools - *out = make([]PoolSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.MetadataServer.DeepCopyInto(&out.MetadataServer) - if in.Mirroring != nil { - in, out := &in.Mirroring, &out.Mirroring - *out = new(FSMirroringSpec) - (*in).DeepCopyInto(*out) - } - in.StatusCheck.DeepCopyInto(&out.StatusCheck) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemSpec. -func (in *FilesystemSpec) DeepCopy() *FilesystemSpec { - if in == nil { - return nil - } - out := new(FilesystemSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FilesystemsSpec) DeepCopyInto(out *FilesystemsSpec) { - *out = *in - if in.Peers != nil { - in, out := &in.Peers, &out.Peers - *out = make([]FilesystemMirrorInfoPeerSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemsSpec. -func (in *FilesystemsSpec) DeepCopy() *FilesystemsSpec { - if in == nil { - return nil - } - out := new(FilesystemsSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GaneshaRADOSSpec) DeepCopyInto(out *GaneshaRADOSSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GaneshaRADOSSpec. -func (in *GaneshaRADOSSpec) DeepCopy() *GaneshaRADOSSpec { - if in == nil { - return nil - } - out := new(GaneshaRADOSSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GaneshaServerSpec) DeepCopyInto(out *GaneshaServerSpec) { - *out = *in - in.Placement.DeepCopyInto(&out.Placement) - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(rookio.Annotations, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(rookio.Labels, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Resources.DeepCopyInto(&out.Resources) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GaneshaServerSpec. -func (in *GaneshaServerSpec) DeepCopy() *GaneshaServerSpec { - if in == nil { - return nil - } - out := new(GaneshaServerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { - *out = *in - in.Placement.DeepCopyInto(&out.Placement) - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(rookio.Annotations, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(rookio.Labels, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.ExternalRgwEndpoints != nil { - in, out := &in.ExternalRgwEndpoints, &out.ExternalRgwEndpoints - *out = make([]corev1.EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(RGWServiceSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. -func (in *GatewaySpec) DeepCopy() *GatewaySpec { - if in == nil { - return nil - } - out := new(GatewaySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HealthCheckSpec) DeepCopyInto(out *HealthCheckSpec) { - *out = *in - if in.Interval != nil { - in, out := &in.Interval, &out.Interval - *out = new(metav1.Duration) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckSpec. -func (in *HealthCheckSpec) DeepCopy() *HealthCheckSpec { - if in == nil { - return nil - } - out := new(HealthCheckSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HybridStorageSpec) DeepCopyInto(out *HybridStorageSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HybridStorageSpec. -func (in *HybridStorageSpec) DeepCopy() *HybridStorageSpec { - if in == nil { - return nil - } - out := new(HybridStorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KeyManagementServiceSpec) DeepCopyInto(out *KeyManagementServiceSpec) { - *out = *in - if in.ConnectionDetails != nil { - in, out := &in.ConnectionDetails, &out.ConnectionDetails - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyManagementServiceSpec. -func (in *KeyManagementServiceSpec) DeepCopy() *KeyManagementServiceSpec { - if in == nil { - return nil - } - out := new(KeyManagementServiceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in LabelsSpec) DeepCopyInto(out *LabelsSpec) { - { - in := &in - *out = make(LabelsSpec, len(*in)) - for key, val := range *in { - var outVal map[string]string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(rookio.Labels, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - (*out)[key] = outVal - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsSpec. -func (in LabelsSpec) DeepCopy() LabelsSpec { - if in == nil { - return nil - } - out := new(LabelsSpec) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LogCollectorSpec) DeepCopyInto(out *LogCollectorSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogCollectorSpec. -func (in *LogCollectorSpec) DeepCopy() *LogCollectorSpec { - if in == nil { - return nil - } - out := new(LogCollectorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetadataServerSpec) DeepCopyInto(out *MetadataServerSpec) { - *out = *in - in.Placement.DeepCopyInto(&out.Placement) - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(rookio.Annotations, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(rookio.Labels, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Resources.DeepCopyInto(&out.Resources) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataServerSpec. -func (in *MetadataServerSpec) DeepCopy() *MetadataServerSpec { - if in == nil { - return nil - } - out := new(MetadataServerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MgrSpec) DeepCopyInto(out *MgrSpec) { - *out = *in - if in.Modules != nil { - in, out := &in.Modules, &out.Modules - *out = make([]Module, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MgrSpec. -func (in *MgrSpec) DeepCopy() *MgrSpec { - if in == nil { - return nil - } - out := new(MgrSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MirrorHealthCheckSpec) DeepCopyInto(out *MirrorHealthCheckSpec) { - *out = *in - in.Mirror.DeepCopyInto(&out.Mirror) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirrorHealthCheckSpec. -func (in *MirrorHealthCheckSpec) DeepCopy() *MirrorHealthCheckSpec { - if in == nil { - return nil - } - out := new(MirrorHealthCheckSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MirroringInfoSpec) DeepCopyInto(out *MirroringInfoSpec) { - *out = *in - if in.PoolMirroringInfo != nil { - in, out := &in.PoolMirroringInfo, &out.PoolMirroringInfo - *out = new(PoolMirroringInfo) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringInfoSpec. -func (in *MirroringInfoSpec) DeepCopy() *MirroringInfoSpec { - if in == nil { - return nil - } - out := new(MirroringInfoSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MirroringPeerSpec) DeepCopyInto(out *MirroringPeerSpec) { - *out = *in - if in.SecretNames != nil { - in, out := &in.SecretNames, &out.SecretNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringPeerSpec. -func (in *MirroringPeerSpec) DeepCopy() *MirroringPeerSpec { - if in == nil { - return nil - } - out := new(MirroringPeerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MirroringSpec) DeepCopyInto(out *MirroringSpec) { - *out = *in - if in.SnapshotSchedules != nil { - in, out := &in.SnapshotSchedules, &out.SnapshotSchedules - *out = make([]SnapshotScheduleSpec, len(*in)) - copy(*out, *in) - } - if in.Peers != nil { - in, out := &in.Peers, &out.Peers - *out = new(MirroringPeerSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringSpec. -func (in *MirroringSpec) DeepCopy() *MirroringSpec { - if in == nil { - return nil - } - out := new(MirroringSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MirroringStatusSpec) DeepCopyInto(out *MirroringStatusSpec) { - *out = *in - in.PoolMirroringStatus.DeepCopyInto(&out.PoolMirroringStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringStatusSpec. -func (in *MirroringStatusSpec) DeepCopy() *MirroringStatusSpec { - if in == nil { - return nil - } - out := new(MirroringStatusSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Module) DeepCopyInto(out *Module) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Module. -func (in *Module) DeepCopy() *Module { - if in == nil { - return nil - } - out := new(Module) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonSpec) DeepCopyInto(out *MonSpec) { - *out = *in - if in.StretchCluster != nil { - in, out := &in.StretchCluster, &out.StretchCluster - *out = new(StretchClusterSpec) - (*in).DeepCopyInto(*out) - } - if in.VolumeClaimTemplate != nil { - in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate - *out = new(corev1.PersistentVolumeClaim) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonSpec. -func (in *MonSpec) DeepCopy() *MonSpec { - if in == nil { - return nil - } - out := new(MonSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { - *out = *in - if in.ExternalMgrEndpoints != nil { - in, out := &in.ExternalMgrEndpoints, &out.ExternalMgrEndpoints - *out = make([]corev1.EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. -func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { - if in == nil { - return nil - } - out := new(MonitoringSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSGaneshaSpec) DeepCopyInto(out *NFSGaneshaSpec) { - *out = *in - out.RADOS = in.RADOS - in.Server.DeepCopyInto(&out.Server) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSGaneshaSpec. -func (in *NFSGaneshaSpec) DeepCopy() *NFSGaneshaSpec { - if in == nil { - return nil - } - out := new(NFSGaneshaSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { - *out = *in - if in.Selectors != nil { - in, out := &in.Selectors, &out.Selectors - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. -func (in *NetworkSpec) DeepCopy() *NetworkSpec { - if in == nil { - return nil - } - out := new(NetworkSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Node) DeepCopyInto(out *Node) { - *out = *in - in.Resources.DeepCopyInto(&out.Resources) - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Selection.DeepCopyInto(&out.Selection) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. -func (in *Node) DeepCopy() *Node { - if in == nil { - return nil - } - out := new(Node) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in NodesByName) DeepCopyInto(out *NodesByName) { - { - in := &in - *out = make(NodesByName, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodesByName. -func (in NodesByName) DeepCopy() NodesByName { - if in == nil { - return nil - } - out := new(NodesByName) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectRealmSpec) DeepCopyInto(out *ObjectRealmSpec) { - *out = *in - out.Pull = in.Pull - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectRealmSpec. -func (in *ObjectRealmSpec) DeepCopy() *ObjectRealmSpec { - if in == nil { - return nil - } - out := new(ObjectRealmSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectStoreSpec) DeepCopyInto(out *ObjectStoreSpec) { - *out = *in - in.MetadataPool.DeepCopyInto(&out.MetadataPool) - in.DataPool.DeepCopyInto(&out.DataPool) - in.Gateway.DeepCopyInto(&out.Gateway) - out.Zone = in.Zone - in.HealthCheck.DeepCopyInto(&out.HealthCheck) - if in.Security != nil { - in, out := &in.Security, &out.Security - *out = new(SecuritySpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreSpec. -func (in *ObjectStoreSpec) DeepCopy() *ObjectStoreSpec { - if in == nil { - return nil - } - out := new(ObjectStoreSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectStoreStatus) DeepCopyInto(out *ObjectStoreStatus) { - *out = *in - if in.BucketStatus != nil { - in, out := &in.BucketStatus, &out.BucketStatus - *out = new(BucketStatus) - **out = **in - } - if in.Info != nil { - in, out := &in.Info, &out.Info - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreStatus. -func (in *ObjectStoreStatus) DeepCopy() *ObjectStoreStatus { - if in == nil { - return nil - } - out := new(ObjectStoreStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectStoreUserSpec) DeepCopyInto(out *ObjectStoreUserSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreUserSpec. -func (in *ObjectStoreUserSpec) DeepCopy() *ObjectStoreUserSpec { - if in == nil { - return nil - } - out := new(ObjectStoreUserSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectStoreUserStatus) DeepCopyInto(out *ObjectStoreUserStatus) { - *out = *in - if in.Info != nil { - in, out := &in.Info, &out.Info - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreUserStatus. -func (in *ObjectStoreUserStatus) DeepCopy() *ObjectStoreUserStatus { - if in == nil { - return nil - } - out := new(ObjectStoreUserStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectZoneGroupSpec) DeepCopyInto(out *ObjectZoneGroupSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectZoneGroupSpec. -func (in *ObjectZoneGroupSpec) DeepCopy() *ObjectZoneGroupSpec { - if in == nil { - return nil - } - out := new(ObjectZoneGroupSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectZoneSpec) DeepCopyInto(out *ObjectZoneSpec) { - *out = *in - in.MetadataPool.DeepCopyInto(&out.MetadataPool) - in.DataPool.DeepCopyInto(&out.DataPool) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectZoneSpec. -func (in *ObjectZoneSpec) DeepCopy() *ObjectZoneSpec { - if in == nil { - return nil - } - out := new(ObjectZoneSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PeerRemoteSpec) DeepCopyInto(out *PeerRemoteSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerRemoteSpec. -func (in *PeerRemoteSpec) DeepCopy() *PeerRemoteSpec { - if in == nil { - return nil - } - out := new(PeerRemoteSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PeerStatSpec) DeepCopyInto(out *PeerStatSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerStatSpec. -func (in *PeerStatSpec) DeepCopy() *PeerStatSpec { - if in == nil { - return nil - } - out := new(PeerStatSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PeersSpec) DeepCopyInto(out *PeersSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeersSpec. -func (in *PeersSpec) DeepCopy() *PeersSpec { - if in == nil { - return nil - } - out := new(PeersSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Placement) DeepCopyInto(out *Placement) { - *out = *in - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(corev1.NodeAffinity) - (*in).DeepCopyInto(*out) - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - *out = new(corev1.PodAffinity) - (*in).DeepCopyInto(*out) - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(corev1.PodAntiAffinity) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TopologySpreadConstraints != nil { - in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]corev1.TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. -func (in *Placement) DeepCopy() *Placement { - if in == nil { - return nil - } - out := new(Placement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in PlacementSpec) DeepCopyInto(out *PlacementSpec) { - { - in := &in - *out = make(PlacementSpec, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementSpec. -func (in PlacementSpec) DeepCopy() PlacementSpec { - if in == nil { - return nil - } - out := new(PlacementSpec) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PoolMirroringInfo) DeepCopyInto(out *PoolMirroringInfo) { - *out = *in - if in.Peers != nil { - in, out := &in.Peers, &out.Peers - *out = make([]PeersSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolMirroringInfo. -func (in *PoolMirroringInfo) DeepCopy() *PoolMirroringInfo { - if in == nil { - return nil - } - out := new(PoolMirroringInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PoolMirroringStatus) DeepCopyInto(out *PoolMirroringStatus) { - *out = *in - if in.Summary != nil { - in, out := &in.Summary, &out.Summary - *out = new(PoolMirroringStatusSummarySpec) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolMirroringStatus. -func (in *PoolMirroringStatus) DeepCopy() *PoolMirroringStatus { - if in == nil { - return nil - } - out := new(PoolMirroringStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PoolMirroringStatusSummarySpec) DeepCopyInto(out *PoolMirroringStatusSummarySpec) { - *out = *in - out.States = in.States - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolMirroringStatusSummarySpec. -func (in *PoolMirroringStatusSummarySpec) DeepCopy() *PoolMirroringStatusSummarySpec { - if in == nil { - return nil - } - out := new(PoolMirroringStatusSummarySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PoolSpec) DeepCopyInto(out *PoolSpec) { - *out = *in - in.Replicated.DeepCopyInto(&out.Replicated) - out.ErasureCoded = in.ErasureCoded - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Mirroring.DeepCopyInto(&out.Mirroring) - in.StatusCheck.DeepCopyInto(&out.StatusCheck) - in.Quotas.DeepCopyInto(&out.Quotas) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolSpec. -func (in *PoolSpec) DeepCopy() *PoolSpec { - if in == nil { - return nil - } - out := new(PoolSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in PriorityClassNamesSpec) DeepCopyInto(out *PriorityClassNamesSpec) { - { - in := &in - *out = make(PriorityClassNamesSpec, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassNamesSpec. -func (in PriorityClassNamesSpec) DeepCopy() PriorityClassNamesSpec { - if in == nil { - return nil - } - out := new(PriorityClassNamesSpec) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { - *out = *in - if in.Probe != nil { - in, out := &in.Probe, &out.Probe - *out = new(corev1.Probe) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeSpec. -func (in *ProbeSpec) DeepCopy() *ProbeSpec { - if in == nil { - return nil - } - out := new(ProbeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PullSpec) DeepCopyInto(out *PullSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullSpec. -func (in *PullSpec) DeepCopy() *PullSpec { - if in == nil { - return nil - } - out := new(PullSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuotaSpec) DeepCopyInto(out *QuotaSpec) { - *out = *in - if in.MaxBytes != nil { - in, out := &in.MaxBytes, &out.MaxBytes - *out = new(uint64) - **out = **in - } - if in.MaxSize != nil { - in, out := &in.MaxSize, &out.MaxSize - *out = new(string) - **out = **in - } - if in.MaxObjects != nil { - in, out := &in.MaxObjects, &out.MaxObjects - *out = new(uint64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSpec. -func (in *QuotaSpec) DeepCopy() *QuotaSpec { - if in == nil { - return nil - } - out := new(QuotaSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RBDMirroringSpec) DeepCopyInto(out *RBDMirroringSpec) { - *out = *in - in.Peers.DeepCopyInto(&out.Peers) - in.Placement.DeepCopyInto(&out.Placement) - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(rookio.Annotations, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(rookio.Labels, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Resources.DeepCopyInto(&out.Resources) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDMirroringSpec. -func (in *RBDMirroringSpec) DeepCopy() *RBDMirroringSpec { - if in == nil { - return nil - } - out := new(RBDMirroringSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RGWServiceSpec) DeepCopyInto(out *RGWServiceSpec) { - *out = *in - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(rookio.Annotations, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RGWServiceSpec. -func (in *RGWServiceSpec) DeepCopy() *RGWServiceSpec { - if in == nil { - return nil - } - out := new(RGWServiceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicatedSpec) DeepCopyInto(out *ReplicatedSpec) { - *out = *in - if in.HybridStorage != nil { - in, out := &in.HybridStorage, &out.HybridStorage - *out = new(HybridStorageSpec) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedSpec. -func (in *ReplicatedSpec) DeepCopy() *ReplicatedSpec { - if in == nil { - return nil - } - out := new(ReplicatedSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ResourceSpec) DeepCopyInto(out *ResourceSpec) { - { - in := &in - *out = make(ResourceSpec, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. -func (in ResourceSpec) DeepCopy() ResourceSpec { - if in == nil { - return nil - } - out := new(ResourceSpec) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SanitizeDisksSpec) DeepCopyInto(out *SanitizeDisksSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SanitizeDisksSpec. -func (in *SanitizeDisksSpec) DeepCopy() *SanitizeDisksSpec { - if in == nil { - return nil - } - out := new(SanitizeDisksSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecuritySpec) DeepCopyInto(out *SecuritySpec) { - *out = *in - in.KeyManagementService.DeepCopyInto(&out.KeyManagementService) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecuritySpec. -func (in *SecuritySpec) DeepCopy() *SecuritySpec { - if in == nil { - return nil - } - out := new(SecuritySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Selection) DeepCopyInto(out *Selection) { - *out = *in - if in.UseAllDevices != nil { - in, out := &in.UseAllDevices, &out.UseAllDevices - *out = new(bool) - **out = **in - } - if in.Devices != nil { - in, out := &in.Devices, &out.Devices - *out = make([]Device, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]corev1.PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selection. -func (in *Selection) DeepCopy() *Selection { - if in == nil { - return nil - } - out := new(Selection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SnapshotSchedule) DeepCopyInto(out *SnapshotSchedule) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSchedule. -func (in *SnapshotSchedule) DeepCopy() *SnapshotSchedule { - if in == nil { - return nil - } - out := new(SnapshotSchedule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SnapshotScheduleRetentionSpec) DeepCopyInto(out *SnapshotScheduleRetentionSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleRetentionSpec. -func (in *SnapshotScheduleRetentionSpec) DeepCopy() *SnapshotScheduleRetentionSpec { - if in == nil { - return nil - } - out := new(SnapshotScheduleRetentionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SnapshotScheduleSpec) DeepCopyInto(out *SnapshotScheduleSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleSpec. -func (in *SnapshotScheduleSpec) DeepCopy() *SnapshotScheduleSpec { - if in == nil { - return nil - } - out := new(SnapshotScheduleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SnapshotScheduleStatusSpec) DeepCopyInto(out *SnapshotScheduleStatusSpec) { - *out = *in - if in.SnapshotSchedules != nil { - in, out := &in.SnapshotSchedules, &out.SnapshotSchedules - *out = make([]SnapshotSchedulesSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleStatusSpec. -func (in *SnapshotScheduleStatusSpec) DeepCopy() *SnapshotScheduleStatusSpec { - if in == nil { - return nil - } - out := new(SnapshotScheduleStatusSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SnapshotSchedulesSpec) DeepCopyInto(out *SnapshotSchedulesSpec) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]SnapshotSchedule, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSchedulesSpec. -func (in *SnapshotSchedulesSpec) DeepCopy() *SnapshotSchedulesSpec { - if in == nil { - return nil - } - out := new(SnapshotSchedulesSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatesSpec) DeepCopyInto(out *StatesSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatesSpec. -func (in *StatesSpec) DeepCopy() *StatesSpec { - if in == nil { - return nil - } - out := new(StatesSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Status) DeepCopyInto(out *Status) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. -func (in *Status) DeepCopy() *Status { - if in == nil { - return nil - } - out := new(Status) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageClassDeviceSet) DeepCopyInto(out *StorageClassDeviceSet) { - *out = *in - in.Resources.DeepCopyInto(&out.Resources) - in.Placement.DeepCopyInto(&out.Placement) - if in.PreparePlacement != nil { - in, out := &in.PreparePlacement, &out.PreparePlacement - *out = new(Placement) - (*in).DeepCopyInto(*out) - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]corev1.PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassDeviceSet. -func (in *StorageClassDeviceSet) DeepCopy() *StorageClassDeviceSet { - if in == nil { - return nil - } - out := new(StorageClassDeviceSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageScopeSpec) DeepCopyInto(out *StorageScopeSpec) { - *out = *in - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = make([]Node, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Selection.DeepCopyInto(&out.Selection) - if in.StorageClassDeviceSets != nil { - in, out := &in.StorageClassDeviceSets, &out.StorageClassDeviceSets - *out = make([]StorageClassDeviceSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageScopeSpec. -func (in *StorageScopeSpec) DeepCopy() *StorageScopeSpec { - if in == nil { - return nil - } - out := new(StorageScopeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StretchClusterSpec) DeepCopyInto(out *StretchClusterSpec) { - *out = *in - if in.Zones != nil { - in, out := &in.Zones, &out.Zones - *out = make([]StretchClusterZoneSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StretchClusterSpec. -func (in *StretchClusterSpec) DeepCopy() *StretchClusterSpec { - if in == nil { - return nil - } - out := new(StretchClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StretchClusterZoneSpec) DeepCopyInto(out *StretchClusterZoneSpec) { - *out = *in - if in.VolumeClaimTemplate != nil { - in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate - *out = new(corev1.PersistentVolumeClaim) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StretchClusterZoneSpec. -func (in *StretchClusterZoneSpec) DeepCopy() *StretchClusterZoneSpec { - if in == nil { - return nil - } - out := new(StretchClusterZoneSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ZoneSpec) DeepCopyInto(out *ZoneSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneSpec. -func (in *ZoneSpec) DeepCopy() *ZoneSpec { - if in == nil { - return nil - } - out := new(ZoneSpec) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/apis/nfs.rook.io/register.go b/pkg/apis/nfs.rook.io/register.go deleted file mode 100644 index 0a7b43d6d..000000000 --- a/pkg/apis/nfs.rook.io/register.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package nfsrookio - -const ( - CustomResourceGroupName = "nfs.rook.io" -) diff --git a/pkg/apis/nfs.rook.io/v1alpha1/doc.go b/pkg/apis/nfs.rook.io/v1alpha1/doc.go deleted file mode 100644 index c629ac00c..000000000 --- a/pkg/apis/nfs.rook.io/v1alpha1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register - -// Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=nfs.rook.io -package v1alpha1 diff --git a/pkg/apis/nfs.rook.io/v1alpha1/register.go b/pkg/apis/nfs.rook.io/v1alpha1/register.go deleted file mode 100644 index a44e66114..000000000 --- a/pkg/apis/nfs.rook.io/v1alpha1/register.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - nfsrookio "github.com/rook/rook/pkg/apis/nfs.rook.io" -) - -const ( - CustomResourceGroup = "nfs.rook.io" - Version = "v1alpha1" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: nfsrookio.CustomResourceGroupName, Version: Version} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &NFSServer{}, - &NFSServerList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/pkg/apis/nfs.rook.io/v1alpha1/types.go b/pkg/apis/nfs.rook.io/v1alpha1/types.go deleted file mode 100644 index 6aee04443..000000000 --- a/pkg/apis/nfs.rook.io/v1alpha1/types.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// *************************************************************************** -// IMPORTANT FOR CODE GENERATION -// If the types in this file are updated, you will need to run -// `make codegen` to generate the new types under the client/clientset folder. -// *************************************************************************** - -const ( - Finalizer = "nfsserver.nfs.rook.io" -) - -const ( - EventCreated = "Created" - EventUpdated = "Updated" - EventFailed = "Failed" -) - -type NFSServerState string - -const ( - StateInitializing NFSServerState = "Initializing" - StatePending NFSServerState = "Pending" - StateRunning NFSServerState = "Running" - StateError NFSServerState = "Error" -) - -// NFSServerStatus defines the observed state of NFSServer -type NFSServerStatus struct { - State NFSServerState `json:"state,omitempty"` - Message string `json:"message,omitempty"` - Reason string `json:"reason,omitempty"` -} - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state",description="NFS Server instance state" - -// NFSServer is the Schema for the nfsservers API -type NFSServer struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NFSServerSpec `json:"spec,omitempty"` - Status NFSServerStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// NFSServerList contains a list of NFSServer -type NFSServerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []NFSServer `json:"items"` -} - -// NFSServerSpec represents the spec of NFS daemon -type NFSServerSpec struct { - // The annotations-related configuration to add/set on each Pod related object. - Annotations map[string]string `json:"annotations,omitempty"` - - // Replicas of the NFS daemon - Replicas int `json:"replicas,omitempty"` - - // The parameters to configure the NFS export - Exports []ExportsSpec `json:"exports,omitempty"` -} - -// ExportsSpec represents the spec of NFS exports -type ExportsSpec struct { - // Name of the export - Name string `json:"name,omitempty"` - - // The NFS server configuration - Server ServerSpec `json:"server,omitempty"` - - // PVC from which the NFS daemon gets storage for sharing - PersistentVolumeClaim v1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` -} - -// ServerSpec represents the spec for configuring the NFS server -type ServerSpec struct { - // Reading and Writing permissions on the export - // Valid values are "ReadOnly", "ReadWrite" and "none" - // +kubebuilder:validation:Enum=ReadOnly;ReadWrite;none - AccessMode string `json:"accessMode,omitempty"` - - // This prevents the root users connected remotely from having root privileges - // Valid values are "none", "rootid", "root", and "all" - // +kubebuilder:validation:Enum=none;rootid;root;all - Squash string `json:"squash,omitempty"` - - // The clients allowed to access the NFS export - // +optional - AllowedClients []AllowedClientsSpec `json:"allowedClients,omitempty"` -} - -// AllowedClientsSpec represents the client specs for accessing the NFS export -type AllowedClientsSpec struct { - - // Name of the clients group - Name string `json:"name,omitempty"` - - // The clients that can access the share - // Values can be hostname, ip address, netgroup, CIDR network address, or all - Clients []string `json:"clients,omitempty"` - - // Reading and Writing permissions for the client to access the NFS export - // Valid values are "ReadOnly", "ReadWrite" and "none" - // Gets overridden when ServerSpec.accessMode is specified - // +kubebuilder:validation:Enum=ReadOnly;ReadWrite;none - AccessMode string `json:"accessMode,omitempty"` - - // Squash options for clients - // Valid values are "none", "rootid", "root", and "all" - // Gets overridden when ServerSpec.squash is specified - // +kubebuilder:validation:Enum=none;rootid;root;all - Squash string `json:"squash,omitempty"` -} diff --git a/pkg/apis/nfs.rook.io/v1alpha1/webhook.go b/pkg/apis/nfs.rook.io/v1alpha1/webhook.go deleted file mode 100644 index a4943cde8..000000000 --- a/pkg/apis/nfs.rook.io/v1alpha1/webhook.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "strings" - - "github.com/coreos/pkg/capnslog" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/kustomize/kyaml/sets" -) - -var ( - webhookName = "nfs-webhook" - logger = capnslog.NewPackageLogger("github.com/rook/rook", webhookName) -) - -// compile-time assertions ensures NFSServer implements webhook.Defaulter so a webhook builder -// will be registered for the mutating webhook. -var _ webhook.Defaulter = &NFSServer{} - -// Default implements webhook.Defaulter contains mutating webhook admission logic. -func (r *NFSServer) Default() { - logger.Info("default", "name", r.Name) - logger.Warning("defaulting is not supported yet") -} - -// compile-time assertions ensures NFSServer implements webhook.Validator so a webhook builder -// will be registered for the validating webhook. -var _ webhook.Validator = &NFSServer{} - -// ValidateCreate implements webhook.Validator contains validating webhook admission logic for CREATE operation -func (r *NFSServer) ValidateCreate() error { - logger.Info("validate create", "name", r.Name) - - if err := r.ValidateSpec(); err != nil { - return err - } - - return nil -} - -// ValidateUpdate implements webhook.Validator contains validating webhook admission logic for UPDATE operation -func (r *NFSServer) ValidateUpdate(old runtime.Object) error { - logger.Info("validate update", "name", r.Name) - - if err := r.ValidateSpec(); err != nil { - return err - } - - return nil -} - -// ValidateDelete implements webhook.Validator contains validating webhook admission logic for DELETE operation -func (r *NFSServer) ValidateDelete() error { - logger.Info("validate delete", "name", r.Name) - logger.Warning("validating delete event is not supported") - - return nil -} - -// ValidateSpec validate NFSServer spec. -func (r *NFSServer) ValidateSpec() error { - var allErrs field.ErrorList - - spec := r.Spec - specPath := field.NewPath("spec") - allErrs = append(allErrs, spec.validateExports(specPath)...) - - return allErrs.ToAggregate() -} - -func (r *NFSServerSpec) validateExports(parentPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - exportsPath := parentPath.Child("exports") - allNames := sets.String{} - allPVCNames := sets.String{} - for i, export := range r.Exports { - idxPath := exportsPath.Index(i) - namePath := idxPath.Child("name") - errList := field.ErrorList{} - if allNames.Has(export.Name) { - errList = append(errList, field.Duplicate(namePath, export.Name)) - } - - pvcNamePath := idxPath.Child("persistentVolumeClaim", "claimName") - if allPVCNames.Has(export.PersistentVolumeClaim.ClaimName) { - errList = append(errList, field.Duplicate(pvcNamePath, export.PersistentVolumeClaim.ClaimName)) - } - - if len(errList) == 0 { - allNames.Insert(export.Name) - allPVCNames.Insert(export.PersistentVolumeClaim.ClaimName) - } else { - allErrs = append(allErrs, errList...) - } - - allErrs = append(allErrs, export.validateServer(idxPath)...) - } - - return allErrs -} - -func (r *ExportsSpec) validateServer(parentPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - server := r.Server - serverPath := parentPath.Child("server") - accessModePath := serverPath.Child("accessMode") - if err := validateAccessMode(accessModePath, server.AccessMode); err != nil { - allErrs = append(allErrs, err) - } - - squashPath := serverPath.Child("squash") - if err := validateSquashMode(squashPath, server.Squash); err != nil { - allErrs = append(allErrs, err) - } - - allErrs = append(allErrs, server.validateAllowedClient(serverPath)...) - - return allErrs -} - -func (r *ServerSpec) validateAllowedClient(parentPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - allowedClientsPath := parentPath.Child("allowedClients") - allNames := sets.String{} - for i, allowedClient := range r.AllowedClients { - idxPath := allowedClientsPath.Index(i) - namePath := idxPath.Child("name") - errList := field.ErrorList{} - if allNames.Has(allowedClient.Name) { - errList = append(errList, field.Duplicate(namePath, allowedClient.Name)) - } - - if len(errList) == 0 { - allNames.Insert(allowedClient.Name) - } else { - allErrs = append(allErrs, errList...) - } - - accessModePath := idxPath.Child("accessMode") - if err := validateAccessMode(accessModePath, allowedClient.AccessMode); err != nil { - allErrs = append(allErrs, err) - } - - squashPath := idxPath.Child("squash") - if err := validateSquashMode(squashPath, allowedClient.Squash); err != nil { - allErrs = append(allErrs, err) - } - } - - return allErrs -} - -func validateAccessMode(path *field.Path, mode string) *field.Error { - switch strings.ToLower(mode) { - case "readonly": - case "readwrite": - case "none": - default: - return field.Invalid(path, mode, "valid values are (ReadOnly, ReadWrite, none)") - } - return nil -} - -func validateSquashMode(path *field.Path, mode string) *field.Error { - switch strings.ToLower(mode) { - case "rootid": - case "root": - case "all": - case "none": - default: - return field.Invalid(path, mode, "valid values are (none, rootId, root, all)") - } - return nil -} diff --git a/pkg/apis/nfs.rook.io/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/nfs.rook.io/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 7294f0cda..000000000 --- a/pkg/apis/nfs.rook.io/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,194 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedClientsSpec) DeepCopyInto(out *AllowedClientsSpec) { - *out = *in - if in.Clients != nil { - in, out := &in.Clients, &out.Clients - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedClientsSpec. -func (in *AllowedClientsSpec) DeepCopy() *AllowedClientsSpec { - if in == nil { - return nil - } - out := new(AllowedClientsSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExportsSpec) DeepCopyInto(out *ExportsSpec) { - *out = *in - in.Server.DeepCopyInto(&out.Server) - out.PersistentVolumeClaim = in.PersistentVolumeClaim - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportsSpec. -func (in *ExportsSpec) DeepCopy() *ExportsSpec { - if in == nil { - return nil - } - out := new(ExportsSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSServer) DeepCopyInto(out *NFSServer) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSServer. -func (in *NFSServer) DeepCopy() *NFSServer { - if in == nil { - return nil - } - out := new(NFSServer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NFSServer) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSServerList) DeepCopyInto(out *NFSServerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NFSServer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSServerList. -func (in *NFSServerList) DeepCopy() *NFSServerList { - if in == nil { - return nil - } - out := new(NFSServerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NFSServerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSServerSpec) DeepCopyInto(out *NFSServerSpec) { - *out = *in - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Exports != nil { - in, out := &in.Exports, &out.Exports - *out = make([]ExportsSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSServerSpec. -func (in *NFSServerSpec) DeepCopy() *NFSServerSpec { - if in == nil { - return nil - } - out := new(NFSServerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSServerStatus) DeepCopyInto(out *NFSServerStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSServerStatus. -func (in *NFSServerStatus) DeepCopy() *NFSServerStatus { - if in == nil { - return nil - } - out := new(NFSServerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServerSpec) DeepCopyInto(out *ServerSpec) { - *out = *in - if in.AllowedClients != nil { - in, out := &in.AllowedClients, &out.AllowedClients - *out = make([]AllowedClientsSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSpec. -func (in *ServerSpec) DeepCopy() *ServerSpec { - if in == nil { - return nil - } - out := new(ServerSpec) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index dbc6b4c15..879ba74a3 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -21,10 +21,8 @@ package versioned import ( "fmt" - cassandrav1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1" - cephv1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1" - nfsv1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1" - rookv1alpha2 "github.com/rook/rook/pkg/client/clientset/versioned/typed/rook.io/v1alpha2" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1" + rookv1alpha2 "github.com/rook/cassandra/pkg/client/clientset/versioned/typed/rook.io/v1alpha2" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -33,8 +31,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface CassandraV1alpha1() cassandrav1alpha1.CassandraV1alpha1Interface - CephV1() cephv1.CephV1Interface - NfsV1alpha1() nfsv1alpha1.NfsV1alpha1Interface RookV1alpha2() rookv1alpha2.RookV1alpha2Interface } @@ -43,8 +39,6 @@ type Interface interface { type Clientset struct { *discovery.DiscoveryClient cassandraV1alpha1 *cassandrav1alpha1.CassandraV1alpha1Client - cephV1 *cephv1.CephV1Client - nfsV1alpha1 *nfsv1alpha1.NfsV1alpha1Client rookV1alpha2 *rookv1alpha2.RookV1alpha2Client } @@ -53,16 +47,6 @@ func (c *Clientset) CassandraV1alpha1() cassandrav1alpha1.CassandraV1alpha1Inter return c.cassandraV1alpha1 } -// CephV1 retrieves the CephV1Client -func (c *Clientset) CephV1() cephv1.CephV1Interface { - return c.cephV1 -} - -// NfsV1alpha1 retrieves the NfsV1alpha1Client -func (c *Clientset) NfsV1alpha1() nfsv1alpha1.NfsV1alpha1Interface { - return c.nfsV1alpha1 -} - // RookV1alpha2 retrieves the RookV1alpha2Client func (c *Clientset) RookV1alpha2() rookv1alpha2.RookV1alpha2Interface { return c.rookV1alpha2 @@ -93,14 +77,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.cephV1, err = cephv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.nfsV1alpha1, err = nfsv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.rookV1alpha2, err = rookv1alpha2.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -118,8 +94,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.cassandraV1alpha1 = cassandrav1alpha1.NewForConfigOrDie(c) - cs.cephV1 = cephv1.NewForConfigOrDie(c) - cs.nfsV1alpha1 = nfsv1alpha1.NewForConfigOrDie(c) cs.rookV1alpha2 = rookv1alpha2.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) @@ -130,8 +104,6 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.cassandraV1alpha1 = cassandrav1alpha1.New(c) - cs.cephV1 = cephv1.New(c) - cs.nfsV1alpha1 = nfsv1alpha1.New(c) cs.rookV1alpha2 = rookv1alpha2.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 9708881f5..8491542c5 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -19,15 +19,11 @@ limitations under the License. package fake import ( - clientset "github.com/rook/rook/pkg/client/clientset/versioned" - cassandrav1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1" - fakecassandrav1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake" - cephv1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1" - fakecephv1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake" - nfsv1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1" - fakenfsv1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake" - rookv1alpha2 "github.com/rook/rook/pkg/client/clientset/versioned/typed/rook.io/v1alpha2" - fakerookv1alpha2 "github.com/rook/rook/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake" + clientset "github.com/rook/cassandra/pkg/client/clientset/versioned" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1" + fakecassandrav1alpha1 "github.com/rook/cassandra/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake" + rookv1alpha2 "github.com/rook/cassandra/pkg/client/clientset/versioned/typed/rook.io/v1alpha2" + fakerookv1alpha2 "github.com/rook/cassandra/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -87,16 +83,6 @@ func (c *Clientset) CassandraV1alpha1() cassandrav1alpha1.CassandraV1alpha1Inter return &fakecassandrav1alpha1.FakeCassandraV1alpha1{Fake: &c.Fake} } -// CephV1 retrieves the CephV1Client -func (c *Clientset) CephV1() cephv1.CephV1Interface { - return &fakecephv1.FakeCephV1{Fake: &c.Fake} -} - -// NfsV1alpha1 retrieves the NfsV1alpha1Client -func (c *Clientset) NfsV1alpha1() nfsv1alpha1.NfsV1alpha1Interface { - return &fakenfsv1alpha1.FakeNfsV1alpha1{Fake: &c.Fake} -} - // RookV1alpha2 retrieves the RookV1alpha2Client func (c *Clientset) RookV1alpha2() rookv1alpha2.RookV1alpha2Interface { return &fakerookv1alpha2.FakeRookV1alpha2{Fake: &c.Fake} diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index a3a07b0fc..810778234 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -19,10 +19,8 @@ limitations under the License. package fake import ( - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - rookv1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + rookv1alpha2 "github.com/rook/cassandra/pkg/apis/rook.io/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -35,8 +33,6 @@ var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ cassandrav1alpha1.AddToScheme, - cephv1.AddToScheme, - nfsv1alpha1.AddToScheme, rookv1alpha2.AddToScheme, } diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index 1b4a713e4..80086fdee 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -19,10 +19,8 @@ limitations under the License. package scheme import ( - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - rookv1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + rookv1alpha2 "github.com/rook/cassandra/pkg/apis/rook.io/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -35,8 +33,6 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ cassandrav1alpha1.AddToScheme, - cephv1.AddToScheme, - nfsv1alpha1.AddToScheme, rookv1alpha2.AddToScheme, } diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cassandra.rook.io_client.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cassandra.rook.io_client.go index 33a040f16..402fc9283 100644 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cassandra.rook.io_client.go +++ b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cassandra.rook.io_client.go @@ -19,8 +19,8 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + v1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cluster.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cluster.go index a08427fb9..09c7e3e05 100644 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cluster.go +++ b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cluster.go @@ -22,8 +22,8 @@ import ( "context" "time" - v1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + v1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + scheme "github.com/rook/cassandra/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cassandra.rook.io_client.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cassandra.rook.io_client.go index 39a28ca2d..9d74e1a6c 100644 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cassandra.rook.io_client.go +++ b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cassandra.rook.io_client.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - v1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1" + v1alpha1 "github.com/rook/cassandra/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cluster.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cluster.go index 6b2493c56..19425dc43 100644 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cluster.go +++ b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cluster.go @@ -21,7 +21,7 @@ package fake import ( "context" - v1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" + v1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go deleted file mode 100644 index 6bf780016..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - rest "k8s.io/client-go/rest" -) - -type CephV1Interface interface { - RESTClient() rest.Interface - CephBlockPoolsGetter - CephClientsGetter - CephClustersGetter - CephFilesystemsGetter - CephFilesystemMirrorsGetter - CephNFSesGetter - CephObjectRealmsGetter - CephObjectStoresGetter - CephObjectStoreUsersGetter - CephObjectZonesGetter - CephObjectZoneGroupsGetter - CephRBDMirrorsGetter -} - -// CephV1Client is used to interact with features provided by the ceph.rook.io group. -type CephV1Client struct { - restClient rest.Interface -} - -func (c *CephV1Client) CephBlockPools(namespace string) CephBlockPoolInterface { - return newCephBlockPools(c, namespace) -} - -func (c *CephV1Client) CephClients(namespace string) CephClientInterface { - return newCephClients(c, namespace) -} - -func (c *CephV1Client) CephClusters(namespace string) CephClusterInterface { - return newCephClusters(c, namespace) -} - -func (c *CephV1Client) CephFilesystems(namespace string) CephFilesystemInterface { - return newCephFilesystems(c, namespace) -} - -func (c *CephV1Client) CephFilesystemMirrors(namespace string) CephFilesystemMirrorInterface { - return newCephFilesystemMirrors(c, namespace) -} - -func (c *CephV1Client) CephNFSes(namespace string) CephNFSInterface { - return newCephNFSes(c, namespace) -} - -func (c *CephV1Client) CephObjectRealms(namespace string) CephObjectRealmInterface { - return newCephObjectRealms(c, namespace) -} - -func (c *CephV1Client) CephObjectStores(namespace string) CephObjectStoreInterface { - return newCephObjectStores(c, namespace) -} - -func (c *CephV1Client) CephObjectStoreUsers(namespace string) CephObjectStoreUserInterface { - return newCephObjectStoreUsers(c, namespace) -} - -func (c *CephV1Client) CephObjectZones(namespace string) CephObjectZoneInterface { - return newCephObjectZones(c, namespace) -} - -func (c *CephV1Client) CephObjectZoneGroups(namespace string) CephObjectZoneGroupInterface { - return newCephObjectZoneGroups(c, namespace) -} - -func (c *CephV1Client) CephRBDMirrors(namespace string) CephRBDMirrorInterface { - return newCephRBDMirrors(c, namespace) -} - -// NewForConfig creates a new CephV1Client for the given config. -func NewForConfig(c *rest.Config) (*CephV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CephV1Client{client}, nil -} - -// NewForConfigOrDie creates a new CephV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CephV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CephV1Client for the given RESTClient. -func New(c rest.Interface) *CephV1Client { - return &CephV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CephV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpool.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpool.go deleted file mode 100644 index b222f65fd..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpool.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephBlockPoolsGetter has a method to return a CephBlockPoolInterface. -// A group's client should implement this interface. -type CephBlockPoolsGetter interface { - CephBlockPools(namespace string) CephBlockPoolInterface -} - -// CephBlockPoolInterface has methods to work with CephBlockPool resources. -type CephBlockPoolInterface interface { - Create(ctx context.Context, cephBlockPool *v1.CephBlockPool, opts metav1.CreateOptions) (*v1.CephBlockPool, error) - Update(ctx context.Context, cephBlockPool *v1.CephBlockPool, opts metav1.UpdateOptions) (*v1.CephBlockPool, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephBlockPool, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephBlockPoolList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBlockPool, err error) - CephBlockPoolExpansion -} - -// cephBlockPools implements CephBlockPoolInterface -type cephBlockPools struct { - client rest.Interface - ns string -} - -// newCephBlockPools returns a CephBlockPools -func newCephBlockPools(c *CephV1Client, namespace string) *cephBlockPools { - return &cephBlockPools{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephBlockPool, and returns the corresponding cephBlockPool object, and an error if there is any. -func (c *cephBlockPools) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephBlockPool, err error) { - result = &v1.CephBlockPool{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephblockpools"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephBlockPools that match those selectors. -func (c *cephBlockPools) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephBlockPoolList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephBlockPoolList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephblockpools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephBlockPools. -func (c *cephBlockPools) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephblockpools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephBlockPool and creates it. Returns the server's representation of the cephBlockPool, and an error, if there is any. -func (c *cephBlockPools) Create(ctx context.Context, cephBlockPool *v1.CephBlockPool, opts metav1.CreateOptions) (result *v1.CephBlockPool, err error) { - result = &v1.CephBlockPool{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephblockpools"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephBlockPool). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephBlockPool and updates it. Returns the server's representation of the cephBlockPool, and an error, if there is any. -func (c *cephBlockPools) Update(ctx context.Context, cephBlockPool *v1.CephBlockPool, opts metav1.UpdateOptions) (result *v1.CephBlockPool, err error) { - result = &v1.CephBlockPool{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephblockpools"). - Name(cephBlockPool.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephBlockPool). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephBlockPool and deletes it. Returns an error if one occurs. -func (c *cephBlockPools) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephblockpools"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephBlockPools) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephblockpools"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephBlockPool. -func (c *cephBlockPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBlockPool, err error) { - result = &v1.CephBlockPool{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephblockpools"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephclient.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephclient.go deleted file mode 100644 index db45d7ef0..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephclient.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephClientsGetter has a method to return a CephClientInterface. -// A group's client should implement this interface. -type CephClientsGetter interface { - CephClients(namespace string) CephClientInterface -} - -// CephClientInterface has methods to work with CephClient resources. -type CephClientInterface interface { - Create(ctx context.Context, cephClient *v1.CephClient, opts metav1.CreateOptions) (*v1.CephClient, error) - Update(ctx context.Context, cephClient *v1.CephClient, opts metav1.UpdateOptions) (*v1.CephClient, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephClient, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephClientList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephClient, err error) - CephClientExpansion -} - -// cephClients implements CephClientInterface -type cephClients struct { - client rest.Interface - ns string -} - -// newCephClients returns a CephClients -func newCephClients(c *CephV1Client, namespace string) *cephClients { - return &cephClients{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephClient, and returns the corresponding cephClient object, and an error if there is any. -func (c *cephClients) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephClient, err error) { - result = &v1.CephClient{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephclients"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephClients that match those selectors. -func (c *cephClients) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephClientList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephClientList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephclients"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephClients. -func (c *cephClients) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephclients"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephClient and creates it. Returns the server's representation of the cephClient, and an error, if there is any. -func (c *cephClients) Create(ctx context.Context, cephClient *v1.CephClient, opts metav1.CreateOptions) (result *v1.CephClient, err error) { - result = &v1.CephClient{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephclients"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephClient). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephClient and updates it. Returns the server's representation of the cephClient, and an error, if there is any. -func (c *cephClients) Update(ctx context.Context, cephClient *v1.CephClient, opts metav1.UpdateOptions) (result *v1.CephClient, err error) { - result = &v1.CephClient{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephclients"). - Name(cephClient.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephClient). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephClient and deletes it. Returns an error if one occurs. -func (c *cephClients) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephclients"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephClients) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephclients"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephClient. -func (c *cephClients) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephClient, err error) { - result = &v1.CephClient{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephclients"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcluster.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcluster.go deleted file mode 100644 index 7ebe4e2c9..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcluster.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephClustersGetter has a method to return a CephClusterInterface. -// A group's client should implement this interface. -type CephClustersGetter interface { - CephClusters(namespace string) CephClusterInterface -} - -// CephClusterInterface has methods to work with CephCluster resources. -type CephClusterInterface interface { - Create(ctx context.Context, cephCluster *v1.CephCluster, opts metav1.CreateOptions) (*v1.CephCluster, error) - Update(ctx context.Context, cephCluster *v1.CephCluster, opts metav1.UpdateOptions) (*v1.CephCluster, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephCluster, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephClusterList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephCluster, err error) - CephClusterExpansion -} - -// cephClusters implements CephClusterInterface -type cephClusters struct { - client rest.Interface - ns string -} - -// newCephClusters returns a CephClusters -func newCephClusters(c *CephV1Client, namespace string) *cephClusters { - return &cephClusters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephCluster, and returns the corresponding cephCluster object, and an error if there is any. -func (c *cephClusters) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephCluster, err error) { - result = &v1.CephCluster{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephclusters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephClusters that match those selectors. -func (c *cephClusters) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephClusterList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephClusterList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephclusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephClusters. -func (c *cephClusters) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephclusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephCluster and creates it. Returns the server's representation of the cephCluster, and an error, if there is any. -func (c *cephClusters) Create(ctx context.Context, cephCluster *v1.CephCluster, opts metav1.CreateOptions) (result *v1.CephCluster, err error) { - result = &v1.CephCluster{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephclusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephCluster). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephCluster and updates it. Returns the server's representation of the cephCluster, and an error, if there is any. -func (c *cephClusters) Update(ctx context.Context, cephCluster *v1.CephCluster, opts metav1.UpdateOptions) (result *v1.CephCluster, err error) { - result = &v1.CephCluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephclusters"). - Name(cephCluster.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephCluster). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephCluster and deletes it. Returns an error if one occurs. -func (c *cephClusters) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephclusters"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephClusters) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephclusters"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephCluster. -func (c *cephClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephCluster, err error) { - result = &v1.CephCluster{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephclusters"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystem.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystem.go deleted file mode 100644 index 1dccce1ef..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystem.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephFilesystemsGetter has a method to return a CephFilesystemInterface. -// A group's client should implement this interface. -type CephFilesystemsGetter interface { - CephFilesystems(namespace string) CephFilesystemInterface -} - -// CephFilesystemInterface has methods to work with CephFilesystem resources. -type CephFilesystemInterface interface { - Create(ctx context.Context, cephFilesystem *v1.CephFilesystem, opts metav1.CreateOptions) (*v1.CephFilesystem, error) - Update(ctx context.Context, cephFilesystem *v1.CephFilesystem, opts metav1.UpdateOptions) (*v1.CephFilesystem, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephFilesystem, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephFilesystemList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystem, err error) - CephFilesystemExpansion -} - -// cephFilesystems implements CephFilesystemInterface -type cephFilesystems struct { - client rest.Interface - ns string -} - -// newCephFilesystems returns a CephFilesystems -func newCephFilesystems(c *CephV1Client, namespace string) *cephFilesystems { - return &cephFilesystems{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephFilesystem, and returns the corresponding cephFilesystem object, and an error if there is any. -func (c *cephFilesystems) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephFilesystem, err error) { - result = &v1.CephFilesystem{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephfilesystems"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephFilesystems that match those selectors. -func (c *cephFilesystems) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephFilesystemList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephFilesystemList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephfilesystems"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephFilesystems. -func (c *cephFilesystems) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephfilesystems"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephFilesystem and creates it. Returns the server's representation of the cephFilesystem, and an error, if there is any. -func (c *cephFilesystems) Create(ctx context.Context, cephFilesystem *v1.CephFilesystem, opts metav1.CreateOptions) (result *v1.CephFilesystem, err error) { - result = &v1.CephFilesystem{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephfilesystems"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephFilesystem). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephFilesystem and updates it. Returns the server's representation of the cephFilesystem, and an error, if there is any. -func (c *cephFilesystems) Update(ctx context.Context, cephFilesystem *v1.CephFilesystem, opts metav1.UpdateOptions) (result *v1.CephFilesystem, err error) { - result = &v1.CephFilesystem{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephfilesystems"). - Name(cephFilesystem.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephFilesystem). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephFilesystem and deletes it. Returns an error if one occurs. -func (c *cephFilesystems) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephfilesystems"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephFilesystems) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephfilesystems"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephFilesystem. -func (c *cephFilesystems) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystem, err error) { - result = &v1.CephFilesystem{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephfilesystems"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemmirror.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemmirror.go deleted file mode 100644 index 867b42f09..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemmirror.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephFilesystemMirrorsGetter has a method to return a CephFilesystemMirrorInterface. -// A group's client should implement this interface. -type CephFilesystemMirrorsGetter interface { - CephFilesystemMirrors(namespace string) CephFilesystemMirrorInterface -} - -// CephFilesystemMirrorInterface has methods to work with CephFilesystemMirror resources. -type CephFilesystemMirrorInterface interface { - Create(ctx context.Context, cephFilesystemMirror *v1.CephFilesystemMirror, opts metav1.CreateOptions) (*v1.CephFilesystemMirror, error) - Update(ctx context.Context, cephFilesystemMirror *v1.CephFilesystemMirror, opts metav1.UpdateOptions) (*v1.CephFilesystemMirror, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephFilesystemMirror, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephFilesystemMirrorList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystemMirror, err error) - CephFilesystemMirrorExpansion -} - -// cephFilesystemMirrors implements CephFilesystemMirrorInterface -type cephFilesystemMirrors struct { - client rest.Interface - ns string -} - -// newCephFilesystemMirrors returns a CephFilesystemMirrors -func newCephFilesystemMirrors(c *CephV1Client, namespace string) *cephFilesystemMirrors { - return &cephFilesystemMirrors{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephFilesystemMirror, and returns the corresponding cephFilesystemMirror object, and an error if there is any. -func (c *cephFilesystemMirrors) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephFilesystemMirror, err error) { - result = &v1.CephFilesystemMirror{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephfilesystemmirrors"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephFilesystemMirrors that match those selectors. -func (c *cephFilesystemMirrors) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephFilesystemMirrorList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephFilesystemMirrorList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephfilesystemmirrors"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephFilesystemMirrors. -func (c *cephFilesystemMirrors) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephfilesystemmirrors"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephFilesystemMirror and creates it. Returns the server's representation of the cephFilesystemMirror, and an error, if there is any. -func (c *cephFilesystemMirrors) Create(ctx context.Context, cephFilesystemMirror *v1.CephFilesystemMirror, opts metav1.CreateOptions) (result *v1.CephFilesystemMirror, err error) { - result = &v1.CephFilesystemMirror{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephfilesystemmirrors"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephFilesystemMirror). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephFilesystemMirror and updates it. Returns the server's representation of the cephFilesystemMirror, and an error, if there is any. -func (c *cephFilesystemMirrors) Update(ctx context.Context, cephFilesystemMirror *v1.CephFilesystemMirror, opts metav1.UpdateOptions) (result *v1.CephFilesystemMirror, err error) { - result = &v1.CephFilesystemMirror{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephfilesystemmirrors"). - Name(cephFilesystemMirror.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephFilesystemMirror). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephFilesystemMirror and deletes it. Returns an error if one occurs. -func (c *cephFilesystemMirrors) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephfilesystemmirrors"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephFilesystemMirrors) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephfilesystemmirrors"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephFilesystemMirror. -func (c *cephFilesystemMirrors) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystemMirror, err error) { - result = &v1.CephFilesystemMirror{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephfilesystemmirrors"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephnfs.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephnfs.go deleted file mode 100644 index bc4351654..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephnfs.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephNFSesGetter has a method to return a CephNFSInterface. -// A group's client should implement this interface. -type CephNFSesGetter interface { - CephNFSes(namespace string) CephNFSInterface -} - -// CephNFSInterface has methods to work with CephNFS resources. -type CephNFSInterface interface { - Create(ctx context.Context, cephNFS *v1.CephNFS, opts metav1.CreateOptions) (*v1.CephNFS, error) - Update(ctx context.Context, cephNFS *v1.CephNFS, opts metav1.UpdateOptions) (*v1.CephNFS, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephNFS, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephNFSList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephNFS, err error) - CephNFSExpansion -} - -// cephNFSes implements CephNFSInterface -type cephNFSes struct { - client rest.Interface - ns string -} - -// newCephNFSes returns a CephNFSes -func newCephNFSes(c *CephV1Client, namespace string) *cephNFSes { - return &cephNFSes{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephNFS, and returns the corresponding cephNFS object, and an error if there is any. -func (c *cephNFSes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephNFS, err error) { - result = &v1.CephNFS{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephnfses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephNFSes that match those selectors. -func (c *cephNFSes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephNFSList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephNFSList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephnfses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephNFSes. -func (c *cephNFSes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephnfses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephNFS and creates it. Returns the server's representation of the cephNFS, and an error, if there is any. -func (c *cephNFSes) Create(ctx context.Context, cephNFS *v1.CephNFS, opts metav1.CreateOptions) (result *v1.CephNFS, err error) { - result = &v1.CephNFS{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephnfses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephNFS). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephNFS and updates it. Returns the server's representation of the cephNFS, and an error, if there is any. -func (c *cephNFSes) Update(ctx context.Context, cephNFS *v1.CephNFS, opts metav1.UpdateOptions) (result *v1.CephNFS, err error) { - result = &v1.CephNFS{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephnfses"). - Name(cephNFS.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephNFS). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephNFS and deletes it. Returns an error if one occurs. -func (c *cephNFSes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephnfses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephNFSes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephnfses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephNFS. -func (c *cephNFSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephNFS, err error) { - result = &v1.CephNFS{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephnfses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectrealm.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectrealm.go deleted file mode 100644 index a408ab852..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectrealm.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephObjectRealmsGetter has a method to return a CephObjectRealmInterface. -// A group's client should implement this interface. -type CephObjectRealmsGetter interface { - CephObjectRealms(namespace string) CephObjectRealmInterface -} - -// CephObjectRealmInterface has methods to work with CephObjectRealm resources. -type CephObjectRealmInterface interface { - Create(ctx context.Context, cephObjectRealm *v1.CephObjectRealm, opts metav1.CreateOptions) (*v1.CephObjectRealm, error) - Update(ctx context.Context, cephObjectRealm *v1.CephObjectRealm, opts metav1.UpdateOptions) (*v1.CephObjectRealm, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephObjectRealm, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephObjectRealmList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectRealm, err error) - CephObjectRealmExpansion -} - -// cephObjectRealms implements CephObjectRealmInterface -type cephObjectRealms struct { - client rest.Interface - ns string -} - -// newCephObjectRealms returns a CephObjectRealms -func newCephObjectRealms(c *CephV1Client, namespace string) *cephObjectRealms { - return &cephObjectRealms{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephObjectRealm, and returns the corresponding cephObjectRealm object, and an error if there is any. -func (c *cephObjectRealms) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephObjectRealm, err error) { - result = &v1.CephObjectRealm{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephobjectrealms"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephObjectRealms that match those selectors. -func (c *cephObjectRealms) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephObjectRealmList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephObjectRealmList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephobjectrealms"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephObjectRealms. -func (c *cephObjectRealms) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephobjectrealms"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephObjectRealm and creates it. Returns the server's representation of the cephObjectRealm, and an error, if there is any. -func (c *cephObjectRealms) Create(ctx context.Context, cephObjectRealm *v1.CephObjectRealm, opts metav1.CreateOptions) (result *v1.CephObjectRealm, err error) { - result = &v1.CephObjectRealm{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephobjectrealms"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephObjectRealm). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephObjectRealm and updates it. Returns the server's representation of the cephObjectRealm, and an error, if there is any. -func (c *cephObjectRealms) Update(ctx context.Context, cephObjectRealm *v1.CephObjectRealm, opts metav1.UpdateOptions) (result *v1.CephObjectRealm, err error) { - result = &v1.CephObjectRealm{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephobjectrealms"). - Name(cephObjectRealm.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephObjectRealm). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephObjectRealm and deletes it. Returns an error if one occurs. -func (c *cephObjectRealms) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephobjectrealms"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephObjectRealms) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephobjectrealms"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephObjectRealm. -func (c *cephObjectRealms) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectRealm, err error) { - result = &v1.CephObjectRealm{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephobjectrealms"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstore.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstore.go deleted file mode 100644 index 7b3156363..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstore.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephObjectStoresGetter has a method to return a CephObjectStoreInterface. -// A group's client should implement this interface. -type CephObjectStoresGetter interface { - CephObjectStores(namespace string) CephObjectStoreInterface -} - -// CephObjectStoreInterface has methods to work with CephObjectStore resources. -type CephObjectStoreInterface interface { - Create(ctx context.Context, cephObjectStore *v1.CephObjectStore, opts metav1.CreateOptions) (*v1.CephObjectStore, error) - Update(ctx context.Context, cephObjectStore *v1.CephObjectStore, opts metav1.UpdateOptions) (*v1.CephObjectStore, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephObjectStore, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephObjectStoreList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectStore, err error) - CephObjectStoreExpansion -} - -// cephObjectStores implements CephObjectStoreInterface -type cephObjectStores struct { - client rest.Interface - ns string -} - -// newCephObjectStores returns a CephObjectStores -func newCephObjectStores(c *CephV1Client, namespace string) *cephObjectStores { - return &cephObjectStores{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephObjectStore, and returns the corresponding cephObjectStore object, and an error if there is any. -func (c *cephObjectStores) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephObjectStore, err error) { - result = &v1.CephObjectStore{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephobjectstores"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephObjectStores that match those selectors. -func (c *cephObjectStores) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephObjectStoreList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephObjectStoreList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephobjectstores"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephObjectStores. -func (c *cephObjectStores) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephobjectstores"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephObjectStore and creates it. Returns the server's representation of the cephObjectStore, and an error, if there is any. -func (c *cephObjectStores) Create(ctx context.Context, cephObjectStore *v1.CephObjectStore, opts metav1.CreateOptions) (result *v1.CephObjectStore, err error) { - result = &v1.CephObjectStore{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephobjectstores"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephObjectStore). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephObjectStore and updates it. Returns the server's representation of the cephObjectStore, and an error, if there is any. -func (c *cephObjectStores) Update(ctx context.Context, cephObjectStore *v1.CephObjectStore, opts metav1.UpdateOptions) (result *v1.CephObjectStore, err error) { - result = &v1.CephObjectStore{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephobjectstores"). - Name(cephObjectStore.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephObjectStore). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephObjectStore and deletes it. Returns an error if one occurs. -func (c *cephObjectStores) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephobjectstores"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephObjectStores) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephobjectstores"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephObjectStore. -func (c *cephObjectStores) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectStore, err error) { - result = &v1.CephObjectStore{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephobjectstores"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstoreuser.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstoreuser.go deleted file mode 100644 index 69e929c27..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstoreuser.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephObjectStoreUsersGetter has a method to return a CephObjectStoreUserInterface. -// A group's client should implement this interface. -type CephObjectStoreUsersGetter interface { - CephObjectStoreUsers(namespace string) CephObjectStoreUserInterface -} - -// CephObjectStoreUserInterface has methods to work with CephObjectStoreUser resources. -type CephObjectStoreUserInterface interface { - Create(ctx context.Context, cephObjectStoreUser *v1.CephObjectStoreUser, opts metav1.CreateOptions) (*v1.CephObjectStoreUser, error) - Update(ctx context.Context, cephObjectStoreUser *v1.CephObjectStoreUser, opts metav1.UpdateOptions) (*v1.CephObjectStoreUser, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephObjectStoreUser, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephObjectStoreUserList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectStoreUser, err error) - CephObjectStoreUserExpansion -} - -// cephObjectStoreUsers implements CephObjectStoreUserInterface -type cephObjectStoreUsers struct { - client rest.Interface - ns string -} - -// newCephObjectStoreUsers returns a CephObjectStoreUsers -func newCephObjectStoreUsers(c *CephV1Client, namespace string) *cephObjectStoreUsers { - return &cephObjectStoreUsers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephObjectStoreUser, and returns the corresponding cephObjectStoreUser object, and an error if there is any. -func (c *cephObjectStoreUsers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephObjectStoreUser, err error) { - result = &v1.CephObjectStoreUser{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephobjectstoreusers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephObjectStoreUsers that match those selectors. -func (c *cephObjectStoreUsers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephObjectStoreUserList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephObjectStoreUserList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephobjectstoreusers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephObjectStoreUsers. -func (c *cephObjectStoreUsers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephobjectstoreusers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephObjectStoreUser and creates it. Returns the server's representation of the cephObjectStoreUser, and an error, if there is any. -func (c *cephObjectStoreUsers) Create(ctx context.Context, cephObjectStoreUser *v1.CephObjectStoreUser, opts metav1.CreateOptions) (result *v1.CephObjectStoreUser, err error) { - result = &v1.CephObjectStoreUser{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephobjectstoreusers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephObjectStoreUser). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephObjectStoreUser and updates it. Returns the server's representation of the cephObjectStoreUser, and an error, if there is any. -func (c *cephObjectStoreUsers) Update(ctx context.Context, cephObjectStoreUser *v1.CephObjectStoreUser, opts metav1.UpdateOptions) (result *v1.CephObjectStoreUser, err error) { - result = &v1.CephObjectStoreUser{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephobjectstoreusers"). - Name(cephObjectStoreUser.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephObjectStoreUser). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephObjectStoreUser and deletes it. Returns an error if one occurs. -func (c *cephObjectStoreUsers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephobjectstoreusers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephObjectStoreUsers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephobjectstoreusers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephObjectStoreUser. -func (c *cephObjectStoreUsers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectStoreUser, err error) { - result = &v1.CephObjectStoreUser{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephobjectstoreusers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzone.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzone.go deleted file mode 100644 index 315d93c3b..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzone.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephObjectZonesGetter has a method to return a CephObjectZoneInterface. -// A group's client should implement this interface. -type CephObjectZonesGetter interface { - CephObjectZones(namespace string) CephObjectZoneInterface -} - -// CephObjectZoneInterface has methods to work with CephObjectZone resources. -type CephObjectZoneInterface interface { - Create(ctx context.Context, cephObjectZone *v1.CephObjectZone, opts metav1.CreateOptions) (*v1.CephObjectZone, error) - Update(ctx context.Context, cephObjectZone *v1.CephObjectZone, opts metav1.UpdateOptions) (*v1.CephObjectZone, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephObjectZone, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephObjectZoneList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectZone, err error) - CephObjectZoneExpansion -} - -// cephObjectZones implements CephObjectZoneInterface -type cephObjectZones struct { - client rest.Interface - ns string -} - -// newCephObjectZones returns a CephObjectZones -func newCephObjectZones(c *CephV1Client, namespace string) *cephObjectZones { - return &cephObjectZones{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephObjectZone, and returns the corresponding cephObjectZone object, and an error if there is any. -func (c *cephObjectZones) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephObjectZone, err error) { - result = &v1.CephObjectZone{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephobjectzones"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephObjectZones that match those selectors. -func (c *cephObjectZones) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephObjectZoneList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephObjectZoneList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephobjectzones"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephObjectZones. -func (c *cephObjectZones) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephobjectzones"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephObjectZone and creates it. Returns the server's representation of the cephObjectZone, and an error, if there is any. -func (c *cephObjectZones) Create(ctx context.Context, cephObjectZone *v1.CephObjectZone, opts metav1.CreateOptions) (result *v1.CephObjectZone, err error) { - result = &v1.CephObjectZone{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephobjectzones"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephObjectZone). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephObjectZone and updates it. Returns the server's representation of the cephObjectZone, and an error, if there is any. -func (c *cephObjectZones) Update(ctx context.Context, cephObjectZone *v1.CephObjectZone, opts metav1.UpdateOptions) (result *v1.CephObjectZone, err error) { - result = &v1.CephObjectZone{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephobjectzones"). - Name(cephObjectZone.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephObjectZone). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephObjectZone and deletes it. Returns an error if one occurs. -func (c *cephObjectZones) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephobjectzones"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephObjectZones) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephobjectzones"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephObjectZone. -func (c *cephObjectZones) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectZone, err error) { - result = &v1.CephObjectZone{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephobjectzones"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzonegroup.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzonegroup.go deleted file mode 100644 index 11899408a..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzonegroup.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephObjectZoneGroupsGetter has a method to return a CephObjectZoneGroupInterface. -// A group's client should implement this interface. -type CephObjectZoneGroupsGetter interface { - CephObjectZoneGroups(namespace string) CephObjectZoneGroupInterface -} - -// CephObjectZoneGroupInterface has methods to work with CephObjectZoneGroup resources. -type CephObjectZoneGroupInterface interface { - Create(ctx context.Context, cephObjectZoneGroup *v1.CephObjectZoneGroup, opts metav1.CreateOptions) (*v1.CephObjectZoneGroup, error) - Update(ctx context.Context, cephObjectZoneGroup *v1.CephObjectZoneGroup, opts metav1.UpdateOptions) (*v1.CephObjectZoneGroup, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephObjectZoneGroup, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephObjectZoneGroupList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectZoneGroup, err error) - CephObjectZoneGroupExpansion -} - -// cephObjectZoneGroups implements CephObjectZoneGroupInterface -type cephObjectZoneGroups struct { - client rest.Interface - ns string -} - -// newCephObjectZoneGroups returns a CephObjectZoneGroups -func newCephObjectZoneGroups(c *CephV1Client, namespace string) *cephObjectZoneGroups { - return &cephObjectZoneGroups{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephObjectZoneGroup, and returns the corresponding cephObjectZoneGroup object, and an error if there is any. -func (c *cephObjectZoneGroups) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephObjectZoneGroup, err error) { - result = &v1.CephObjectZoneGroup{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephobjectzonegroups"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephObjectZoneGroups that match those selectors. -func (c *cephObjectZoneGroups) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephObjectZoneGroupList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephObjectZoneGroupList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephobjectzonegroups"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephObjectZoneGroups. -func (c *cephObjectZoneGroups) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephobjectzonegroups"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephObjectZoneGroup and creates it. Returns the server's representation of the cephObjectZoneGroup, and an error, if there is any. -func (c *cephObjectZoneGroups) Create(ctx context.Context, cephObjectZoneGroup *v1.CephObjectZoneGroup, opts metav1.CreateOptions) (result *v1.CephObjectZoneGroup, err error) { - result = &v1.CephObjectZoneGroup{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephobjectzonegroups"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephObjectZoneGroup). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephObjectZoneGroup and updates it. Returns the server's representation of the cephObjectZoneGroup, and an error, if there is any. -func (c *cephObjectZoneGroups) Update(ctx context.Context, cephObjectZoneGroup *v1.CephObjectZoneGroup, opts metav1.UpdateOptions) (result *v1.CephObjectZoneGroup, err error) { - result = &v1.CephObjectZoneGroup{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephobjectzonegroups"). - Name(cephObjectZoneGroup.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephObjectZoneGroup). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephObjectZoneGroup and deletes it. Returns an error if one occurs. -func (c *cephObjectZoneGroups) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephobjectzonegroups"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephObjectZoneGroups) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephobjectzonegroups"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephObjectZoneGroup. -func (c *cephObjectZoneGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectZoneGroup, err error) { - result = &v1.CephObjectZoneGroup{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephobjectzonegroups"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephrbdmirror.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephrbdmirror.go deleted file mode 100644 index 524e8a98f..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephrbdmirror.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CephRBDMirrorsGetter has a method to return a CephRBDMirrorInterface. -// A group's client should implement this interface. -type CephRBDMirrorsGetter interface { - CephRBDMirrors(namespace string) CephRBDMirrorInterface -} - -// CephRBDMirrorInterface has methods to work with CephRBDMirror resources. -type CephRBDMirrorInterface interface { - Create(ctx context.Context, cephRBDMirror *v1.CephRBDMirror, opts metav1.CreateOptions) (*v1.CephRBDMirror, error) - Update(ctx context.Context, cephRBDMirror *v1.CephRBDMirror, opts metav1.UpdateOptions) (*v1.CephRBDMirror, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephRBDMirror, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CephRBDMirrorList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephRBDMirror, err error) - CephRBDMirrorExpansion -} - -// cephRBDMirrors implements CephRBDMirrorInterface -type cephRBDMirrors struct { - client rest.Interface - ns string -} - -// newCephRBDMirrors returns a CephRBDMirrors -func newCephRBDMirrors(c *CephV1Client, namespace string) *cephRBDMirrors { - return &cephRBDMirrors{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cephRBDMirror, and returns the corresponding cephRBDMirror object, and an error if there is any. -func (c *cephRBDMirrors) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephRBDMirror, err error) { - result = &v1.CephRBDMirror{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephrbdmirrors"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CephRBDMirrors that match those selectors. -func (c *cephRBDMirrors) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephRBDMirrorList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CephRBDMirrorList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cephrbdmirrors"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cephRBDMirrors. -func (c *cephRBDMirrors) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cephrbdmirrors"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cephRBDMirror and creates it. Returns the server's representation of the cephRBDMirror, and an error, if there is any. -func (c *cephRBDMirrors) Create(ctx context.Context, cephRBDMirror *v1.CephRBDMirror, opts metav1.CreateOptions) (result *v1.CephRBDMirror, err error) { - result = &v1.CephRBDMirror{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cephrbdmirrors"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephRBDMirror). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cephRBDMirror and updates it. Returns the server's representation of the cephRBDMirror, and an error, if there is any. -func (c *cephRBDMirrors) Update(ctx context.Context, cephRBDMirror *v1.CephRBDMirror, opts metav1.UpdateOptions) (result *v1.CephRBDMirror, err error) { - result = &v1.CephRBDMirror{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cephrbdmirrors"). - Name(cephRBDMirror.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cephRBDMirror). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cephRBDMirror and deletes it. Returns an error if one occurs. -func (c *cephRBDMirrors) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cephrbdmirrors"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cephRBDMirrors) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cephrbdmirrors"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cephRBDMirror. -func (c *cephRBDMirrors) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephRBDMirror, err error) { - result = &v1.CephRBDMirror{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cephrbdmirrors"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/doc.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/doc.go deleted file mode 100644 index 3af5d054f..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/doc.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go deleted file mode 100644 index afbe03dcd..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeCephV1 struct { - *testing.Fake -} - -func (c *FakeCephV1) CephBlockPools(namespace string) v1.CephBlockPoolInterface { - return &FakeCephBlockPools{c, namespace} -} - -func (c *FakeCephV1) CephClients(namespace string) v1.CephClientInterface { - return &FakeCephClients{c, namespace} -} - -func (c *FakeCephV1) CephClusters(namespace string) v1.CephClusterInterface { - return &FakeCephClusters{c, namespace} -} - -func (c *FakeCephV1) CephFilesystems(namespace string) v1.CephFilesystemInterface { - return &FakeCephFilesystems{c, namespace} -} - -func (c *FakeCephV1) CephFilesystemMirrors(namespace string) v1.CephFilesystemMirrorInterface { - return &FakeCephFilesystemMirrors{c, namespace} -} - -func (c *FakeCephV1) CephNFSes(namespace string) v1.CephNFSInterface { - return &FakeCephNFSes{c, namespace} -} - -func (c *FakeCephV1) CephObjectRealms(namespace string) v1.CephObjectRealmInterface { - return &FakeCephObjectRealms{c, namespace} -} - -func (c *FakeCephV1) CephObjectStores(namespace string) v1.CephObjectStoreInterface { - return &FakeCephObjectStores{c, namespace} -} - -func (c *FakeCephV1) CephObjectStoreUsers(namespace string) v1.CephObjectStoreUserInterface { - return &FakeCephObjectStoreUsers{c, namespace} -} - -func (c *FakeCephV1) CephObjectZones(namespace string) v1.CephObjectZoneInterface { - return &FakeCephObjectZones{c, namespace} -} - -func (c *FakeCephV1) CephObjectZoneGroups(namespace string) v1.CephObjectZoneGroupInterface { - return &FakeCephObjectZoneGroups{c, namespace} -} - -func (c *FakeCephV1) CephRBDMirrors(namespace string) v1.CephRBDMirrorInterface { - return &FakeCephRBDMirrors{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCephV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephblockpool.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephblockpool.go deleted file mode 100644 index 991b9b293..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephblockpool.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephBlockPools implements CephBlockPoolInterface -type FakeCephBlockPools struct { - Fake *FakeCephV1 - ns string -} - -var cephblockpoolsResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephblockpools"} - -var cephblockpoolsKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephBlockPool"} - -// Get takes name of the cephBlockPool, and returns the corresponding cephBlockPool object, and an error if there is any. -func (c *FakeCephBlockPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephBlockPool, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephblockpoolsResource, c.ns, name), &cephrookiov1.CephBlockPool{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephBlockPool), err -} - -// List takes label and field selectors, and returns the list of CephBlockPools that match those selectors. -func (c *FakeCephBlockPools) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephBlockPoolList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephblockpoolsResource, cephblockpoolsKind, c.ns, opts), &cephrookiov1.CephBlockPoolList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephBlockPoolList{ListMeta: obj.(*cephrookiov1.CephBlockPoolList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephBlockPoolList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephBlockPools. -func (c *FakeCephBlockPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephblockpoolsResource, c.ns, opts)) - -} - -// Create takes the representation of a cephBlockPool and creates it. Returns the server's representation of the cephBlockPool, and an error, if there is any. -func (c *FakeCephBlockPools) Create(ctx context.Context, cephBlockPool *cephrookiov1.CephBlockPool, opts v1.CreateOptions) (result *cephrookiov1.CephBlockPool, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephblockpoolsResource, c.ns, cephBlockPool), &cephrookiov1.CephBlockPool{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephBlockPool), err -} - -// Update takes the representation of a cephBlockPool and updates it. Returns the server's representation of the cephBlockPool, and an error, if there is any. -func (c *FakeCephBlockPools) Update(ctx context.Context, cephBlockPool *cephrookiov1.CephBlockPool, opts v1.UpdateOptions) (result *cephrookiov1.CephBlockPool, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephblockpoolsResource, c.ns, cephBlockPool), &cephrookiov1.CephBlockPool{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephBlockPool), err -} - -// Delete takes name of the cephBlockPool and deletes it. Returns an error if one occurs. -func (c *FakeCephBlockPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephblockpoolsResource, c.ns, name), &cephrookiov1.CephBlockPool{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephBlockPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephblockpoolsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephBlockPoolList{}) - return err -} - -// Patch applies the patch and returns the patched cephBlockPool. -func (c *FakeCephBlockPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephBlockPool, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephblockpoolsResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephBlockPool{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephBlockPool), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephclient.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephclient.go deleted file mode 100644 index 963842e3f..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephclient.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephClients implements CephClientInterface -type FakeCephClients struct { - Fake *FakeCephV1 - ns string -} - -var cephclientsResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephclients"} - -var cephclientsKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephClient"} - -// Get takes name of the cephClient, and returns the corresponding cephClient object, and an error if there is any. -func (c *FakeCephClients) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephClient, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephclientsResource, c.ns, name), &cephrookiov1.CephClient{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephClient), err -} - -// List takes label and field selectors, and returns the list of CephClients that match those selectors. -func (c *FakeCephClients) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephClientList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephclientsResource, cephclientsKind, c.ns, opts), &cephrookiov1.CephClientList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephClientList{ListMeta: obj.(*cephrookiov1.CephClientList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephClientList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephClients. -func (c *FakeCephClients) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephclientsResource, c.ns, opts)) - -} - -// Create takes the representation of a cephClient and creates it. Returns the server's representation of the cephClient, and an error, if there is any. -func (c *FakeCephClients) Create(ctx context.Context, cephClient *cephrookiov1.CephClient, opts v1.CreateOptions) (result *cephrookiov1.CephClient, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephclientsResource, c.ns, cephClient), &cephrookiov1.CephClient{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephClient), err -} - -// Update takes the representation of a cephClient and updates it. Returns the server's representation of the cephClient, and an error, if there is any. -func (c *FakeCephClients) Update(ctx context.Context, cephClient *cephrookiov1.CephClient, opts v1.UpdateOptions) (result *cephrookiov1.CephClient, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephclientsResource, c.ns, cephClient), &cephrookiov1.CephClient{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephClient), err -} - -// Delete takes name of the cephClient and deletes it. Returns an error if one occurs. -func (c *FakeCephClients) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephclientsResource, c.ns, name), &cephrookiov1.CephClient{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephClients) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephclientsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephClientList{}) - return err -} - -// Patch applies the patch and returns the patched cephClient. -func (c *FakeCephClients) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephClient, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephclientsResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephClient{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephClient), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephcluster.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephcluster.go deleted file mode 100644 index f30bcf70a..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephcluster.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephClusters implements CephClusterInterface -type FakeCephClusters struct { - Fake *FakeCephV1 - ns string -} - -var cephclustersResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephclusters"} - -var cephclustersKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephCluster"} - -// Get takes name of the cephCluster, and returns the corresponding cephCluster object, and an error if there is any. -func (c *FakeCephClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephclustersResource, c.ns, name), &cephrookiov1.CephCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephCluster), err -} - -// List takes label and field selectors, and returns the list of CephClusters that match those selectors. -func (c *FakeCephClusters) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephClusterList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephclustersResource, cephclustersKind, c.ns, opts), &cephrookiov1.CephClusterList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephClusterList{ListMeta: obj.(*cephrookiov1.CephClusterList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephClusterList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephClusters. -func (c *FakeCephClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephclustersResource, c.ns, opts)) - -} - -// Create takes the representation of a cephCluster and creates it. Returns the server's representation of the cephCluster, and an error, if there is any. -func (c *FakeCephClusters) Create(ctx context.Context, cephCluster *cephrookiov1.CephCluster, opts v1.CreateOptions) (result *cephrookiov1.CephCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephclustersResource, c.ns, cephCluster), &cephrookiov1.CephCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephCluster), err -} - -// Update takes the representation of a cephCluster and updates it. Returns the server's representation of the cephCluster, and an error, if there is any. -func (c *FakeCephClusters) Update(ctx context.Context, cephCluster *cephrookiov1.CephCluster, opts v1.UpdateOptions) (result *cephrookiov1.CephCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephclustersResource, c.ns, cephCluster), &cephrookiov1.CephCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephCluster), err -} - -// Delete takes name of the cephCluster and deletes it. Returns an error if one occurs. -func (c *FakeCephClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephclustersResource, c.ns, name), &cephrookiov1.CephCluster{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephclustersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephClusterList{}) - return err -} - -// Patch applies the patch and returns the patched cephCluster. -func (c *FakeCephClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephclustersResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephCluster), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephfilesystem.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephfilesystem.go deleted file mode 100644 index 0f998e3fc..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephfilesystem.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephFilesystems implements CephFilesystemInterface -type FakeCephFilesystems struct { - Fake *FakeCephV1 - ns string -} - -var cephfilesystemsResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephfilesystems"} - -var cephfilesystemsKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephFilesystem"} - -// Get takes name of the cephFilesystem, and returns the corresponding cephFilesystem object, and an error if there is any. -func (c *FakeCephFilesystems) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephFilesystem, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephfilesystemsResource, c.ns, name), &cephrookiov1.CephFilesystem{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephFilesystem), err -} - -// List takes label and field selectors, and returns the list of CephFilesystems that match those selectors. -func (c *FakeCephFilesystems) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephFilesystemList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephfilesystemsResource, cephfilesystemsKind, c.ns, opts), &cephrookiov1.CephFilesystemList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephFilesystemList{ListMeta: obj.(*cephrookiov1.CephFilesystemList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephFilesystemList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephFilesystems. -func (c *FakeCephFilesystems) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephfilesystemsResource, c.ns, opts)) - -} - -// Create takes the representation of a cephFilesystem and creates it. Returns the server's representation of the cephFilesystem, and an error, if there is any. -func (c *FakeCephFilesystems) Create(ctx context.Context, cephFilesystem *cephrookiov1.CephFilesystem, opts v1.CreateOptions) (result *cephrookiov1.CephFilesystem, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephfilesystemsResource, c.ns, cephFilesystem), &cephrookiov1.CephFilesystem{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephFilesystem), err -} - -// Update takes the representation of a cephFilesystem and updates it. Returns the server's representation of the cephFilesystem, and an error, if there is any. -func (c *FakeCephFilesystems) Update(ctx context.Context, cephFilesystem *cephrookiov1.CephFilesystem, opts v1.UpdateOptions) (result *cephrookiov1.CephFilesystem, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephfilesystemsResource, c.ns, cephFilesystem), &cephrookiov1.CephFilesystem{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephFilesystem), err -} - -// Delete takes name of the cephFilesystem and deletes it. Returns an error if one occurs. -func (c *FakeCephFilesystems) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephfilesystemsResource, c.ns, name), &cephrookiov1.CephFilesystem{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephFilesystems) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephfilesystemsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephFilesystemList{}) - return err -} - -// Patch applies the patch and returns the patched cephFilesystem. -func (c *FakeCephFilesystems) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephFilesystem, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephfilesystemsResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephFilesystem{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephFilesystem), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephfilesystemmirror.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephfilesystemmirror.go deleted file mode 100644 index 1394c4556..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephfilesystemmirror.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephFilesystemMirrors implements CephFilesystemMirrorInterface -type FakeCephFilesystemMirrors struct { - Fake *FakeCephV1 - ns string -} - -var cephfilesystemmirrorsResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephfilesystemmirrors"} - -var cephfilesystemmirrorsKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephFilesystemMirror"} - -// Get takes name of the cephFilesystemMirror, and returns the corresponding cephFilesystemMirror object, and an error if there is any. -func (c *FakeCephFilesystemMirrors) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephFilesystemMirror, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephfilesystemmirrorsResource, c.ns, name), &cephrookiov1.CephFilesystemMirror{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephFilesystemMirror), err -} - -// List takes label and field selectors, and returns the list of CephFilesystemMirrors that match those selectors. -func (c *FakeCephFilesystemMirrors) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephFilesystemMirrorList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephfilesystemmirrorsResource, cephfilesystemmirrorsKind, c.ns, opts), &cephrookiov1.CephFilesystemMirrorList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephFilesystemMirrorList{ListMeta: obj.(*cephrookiov1.CephFilesystemMirrorList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephFilesystemMirrorList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephFilesystemMirrors. -func (c *FakeCephFilesystemMirrors) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephfilesystemmirrorsResource, c.ns, opts)) - -} - -// Create takes the representation of a cephFilesystemMirror and creates it. Returns the server's representation of the cephFilesystemMirror, and an error, if there is any. -func (c *FakeCephFilesystemMirrors) Create(ctx context.Context, cephFilesystemMirror *cephrookiov1.CephFilesystemMirror, opts v1.CreateOptions) (result *cephrookiov1.CephFilesystemMirror, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephfilesystemmirrorsResource, c.ns, cephFilesystemMirror), &cephrookiov1.CephFilesystemMirror{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephFilesystemMirror), err -} - -// Update takes the representation of a cephFilesystemMirror and updates it. Returns the server's representation of the cephFilesystemMirror, and an error, if there is any. -func (c *FakeCephFilesystemMirrors) Update(ctx context.Context, cephFilesystemMirror *cephrookiov1.CephFilesystemMirror, opts v1.UpdateOptions) (result *cephrookiov1.CephFilesystemMirror, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephfilesystemmirrorsResource, c.ns, cephFilesystemMirror), &cephrookiov1.CephFilesystemMirror{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephFilesystemMirror), err -} - -// Delete takes name of the cephFilesystemMirror and deletes it. Returns an error if one occurs. -func (c *FakeCephFilesystemMirrors) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephfilesystemmirrorsResource, c.ns, name), &cephrookiov1.CephFilesystemMirror{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephFilesystemMirrors) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephfilesystemmirrorsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephFilesystemMirrorList{}) - return err -} - -// Patch applies the patch and returns the patched cephFilesystemMirror. -func (c *FakeCephFilesystemMirrors) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephFilesystemMirror, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephfilesystemmirrorsResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephFilesystemMirror{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephFilesystemMirror), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephnfs.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephnfs.go deleted file mode 100644 index e1059df68..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephnfs.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephNFSes implements CephNFSInterface -type FakeCephNFSes struct { - Fake *FakeCephV1 - ns string -} - -var cephnfsesResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephnfses"} - -var cephnfsesKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephNFS"} - -// Get takes name of the cephNFS, and returns the corresponding cephNFS object, and an error if there is any. -func (c *FakeCephNFSes) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephNFS, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephnfsesResource, c.ns, name), &cephrookiov1.CephNFS{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephNFS), err -} - -// List takes label and field selectors, and returns the list of CephNFSes that match those selectors. -func (c *FakeCephNFSes) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephNFSList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephnfsesResource, cephnfsesKind, c.ns, opts), &cephrookiov1.CephNFSList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephNFSList{ListMeta: obj.(*cephrookiov1.CephNFSList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephNFSList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephNFSes. -func (c *FakeCephNFSes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephnfsesResource, c.ns, opts)) - -} - -// Create takes the representation of a cephNFS and creates it. Returns the server's representation of the cephNFS, and an error, if there is any. -func (c *FakeCephNFSes) Create(ctx context.Context, cephNFS *cephrookiov1.CephNFS, opts v1.CreateOptions) (result *cephrookiov1.CephNFS, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephnfsesResource, c.ns, cephNFS), &cephrookiov1.CephNFS{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephNFS), err -} - -// Update takes the representation of a cephNFS and updates it. Returns the server's representation of the cephNFS, and an error, if there is any. -func (c *FakeCephNFSes) Update(ctx context.Context, cephNFS *cephrookiov1.CephNFS, opts v1.UpdateOptions) (result *cephrookiov1.CephNFS, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephnfsesResource, c.ns, cephNFS), &cephrookiov1.CephNFS{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephNFS), err -} - -// Delete takes name of the cephNFS and deletes it. Returns an error if one occurs. -func (c *FakeCephNFSes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephnfsesResource, c.ns, name), &cephrookiov1.CephNFS{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephNFSes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephnfsesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephNFSList{}) - return err -} - -// Patch applies the patch and returns the patched cephNFS. -func (c *FakeCephNFSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephNFS, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephnfsesResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephNFS{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephNFS), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectrealm.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectrealm.go deleted file mode 100644 index 818f00d7d..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectrealm.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephObjectRealms implements CephObjectRealmInterface -type FakeCephObjectRealms struct { - Fake *FakeCephV1 - ns string -} - -var cephobjectrealmsResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephobjectrealms"} - -var cephobjectrealmsKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephObjectRealm"} - -// Get takes name of the cephObjectRealm, and returns the corresponding cephObjectRealm object, and an error if there is any. -func (c *FakeCephObjectRealms) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephObjectRealm, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephobjectrealmsResource, c.ns, name), &cephrookiov1.CephObjectRealm{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectRealm), err -} - -// List takes label and field selectors, and returns the list of CephObjectRealms that match those selectors. -func (c *FakeCephObjectRealms) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephObjectRealmList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephobjectrealmsResource, cephobjectrealmsKind, c.ns, opts), &cephrookiov1.CephObjectRealmList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephObjectRealmList{ListMeta: obj.(*cephrookiov1.CephObjectRealmList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephObjectRealmList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephObjectRealms. -func (c *FakeCephObjectRealms) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephobjectrealmsResource, c.ns, opts)) - -} - -// Create takes the representation of a cephObjectRealm and creates it. Returns the server's representation of the cephObjectRealm, and an error, if there is any. -func (c *FakeCephObjectRealms) Create(ctx context.Context, cephObjectRealm *cephrookiov1.CephObjectRealm, opts v1.CreateOptions) (result *cephrookiov1.CephObjectRealm, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephobjectrealmsResource, c.ns, cephObjectRealm), &cephrookiov1.CephObjectRealm{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectRealm), err -} - -// Update takes the representation of a cephObjectRealm and updates it. Returns the server's representation of the cephObjectRealm, and an error, if there is any. -func (c *FakeCephObjectRealms) Update(ctx context.Context, cephObjectRealm *cephrookiov1.CephObjectRealm, opts v1.UpdateOptions) (result *cephrookiov1.CephObjectRealm, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephobjectrealmsResource, c.ns, cephObjectRealm), &cephrookiov1.CephObjectRealm{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectRealm), err -} - -// Delete takes name of the cephObjectRealm and deletes it. Returns an error if one occurs. -func (c *FakeCephObjectRealms) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephobjectrealmsResource, c.ns, name), &cephrookiov1.CephObjectRealm{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephObjectRealms) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephobjectrealmsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephObjectRealmList{}) - return err -} - -// Patch applies the patch and returns the patched cephObjectRealm. -func (c *FakeCephObjectRealms) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephObjectRealm, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephobjectrealmsResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephObjectRealm{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectRealm), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectstore.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectstore.go deleted file mode 100644 index f571e3809..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectstore.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephObjectStores implements CephObjectStoreInterface -type FakeCephObjectStores struct { - Fake *FakeCephV1 - ns string -} - -var cephobjectstoresResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephobjectstores"} - -var cephobjectstoresKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephObjectStore"} - -// Get takes name of the cephObjectStore, and returns the corresponding cephObjectStore object, and an error if there is any. -func (c *FakeCephObjectStores) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephObjectStore, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephobjectstoresResource, c.ns, name), &cephrookiov1.CephObjectStore{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectStore), err -} - -// List takes label and field selectors, and returns the list of CephObjectStores that match those selectors. -func (c *FakeCephObjectStores) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephObjectStoreList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephobjectstoresResource, cephobjectstoresKind, c.ns, opts), &cephrookiov1.CephObjectStoreList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephObjectStoreList{ListMeta: obj.(*cephrookiov1.CephObjectStoreList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephObjectStoreList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephObjectStores. -func (c *FakeCephObjectStores) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephobjectstoresResource, c.ns, opts)) - -} - -// Create takes the representation of a cephObjectStore and creates it. Returns the server's representation of the cephObjectStore, and an error, if there is any. -func (c *FakeCephObjectStores) Create(ctx context.Context, cephObjectStore *cephrookiov1.CephObjectStore, opts v1.CreateOptions) (result *cephrookiov1.CephObjectStore, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephobjectstoresResource, c.ns, cephObjectStore), &cephrookiov1.CephObjectStore{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectStore), err -} - -// Update takes the representation of a cephObjectStore and updates it. Returns the server's representation of the cephObjectStore, and an error, if there is any. -func (c *FakeCephObjectStores) Update(ctx context.Context, cephObjectStore *cephrookiov1.CephObjectStore, opts v1.UpdateOptions) (result *cephrookiov1.CephObjectStore, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephobjectstoresResource, c.ns, cephObjectStore), &cephrookiov1.CephObjectStore{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectStore), err -} - -// Delete takes name of the cephObjectStore and deletes it. Returns an error if one occurs. -func (c *FakeCephObjectStores) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephobjectstoresResource, c.ns, name), &cephrookiov1.CephObjectStore{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephObjectStores) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephobjectstoresResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephObjectStoreList{}) - return err -} - -// Patch applies the patch and returns the patched cephObjectStore. -func (c *FakeCephObjectStores) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephObjectStore, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephobjectstoresResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephObjectStore{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectStore), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectstoreuser.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectstoreuser.go deleted file mode 100644 index ebe8f91e5..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectstoreuser.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephObjectStoreUsers implements CephObjectStoreUserInterface -type FakeCephObjectStoreUsers struct { - Fake *FakeCephV1 - ns string -} - -var cephobjectstoreusersResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephobjectstoreusers"} - -var cephobjectstoreusersKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephObjectStoreUser"} - -// Get takes name of the cephObjectStoreUser, and returns the corresponding cephObjectStoreUser object, and an error if there is any. -func (c *FakeCephObjectStoreUsers) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephObjectStoreUser, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephobjectstoreusersResource, c.ns, name), &cephrookiov1.CephObjectStoreUser{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectStoreUser), err -} - -// List takes label and field selectors, and returns the list of CephObjectStoreUsers that match those selectors. -func (c *FakeCephObjectStoreUsers) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephObjectStoreUserList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephobjectstoreusersResource, cephobjectstoreusersKind, c.ns, opts), &cephrookiov1.CephObjectStoreUserList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephObjectStoreUserList{ListMeta: obj.(*cephrookiov1.CephObjectStoreUserList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephObjectStoreUserList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephObjectStoreUsers. -func (c *FakeCephObjectStoreUsers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephobjectstoreusersResource, c.ns, opts)) - -} - -// Create takes the representation of a cephObjectStoreUser and creates it. Returns the server's representation of the cephObjectStoreUser, and an error, if there is any. -func (c *FakeCephObjectStoreUsers) Create(ctx context.Context, cephObjectStoreUser *cephrookiov1.CephObjectStoreUser, opts v1.CreateOptions) (result *cephrookiov1.CephObjectStoreUser, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephobjectstoreusersResource, c.ns, cephObjectStoreUser), &cephrookiov1.CephObjectStoreUser{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectStoreUser), err -} - -// Update takes the representation of a cephObjectStoreUser and updates it. Returns the server's representation of the cephObjectStoreUser, and an error, if there is any. -func (c *FakeCephObjectStoreUsers) Update(ctx context.Context, cephObjectStoreUser *cephrookiov1.CephObjectStoreUser, opts v1.UpdateOptions) (result *cephrookiov1.CephObjectStoreUser, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephobjectstoreusersResource, c.ns, cephObjectStoreUser), &cephrookiov1.CephObjectStoreUser{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectStoreUser), err -} - -// Delete takes name of the cephObjectStoreUser and deletes it. Returns an error if one occurs. -func (c *FakeCephObjectStoreUsers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephobjectstoreusersResource, c.ns, name), &cephrookiov1.CephObjectStoreUser{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephObjectStoreUsers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephobjectstoreusersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephObjectStoreUserList{}) - return err -} - -// Patch applies the patch and returns the patched cephObjectStoreUser. -func (c *FakeCephObjectStoreUsers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephObjectStoreUser, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephobjectstoreusersResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephObjectStoreUser{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectStoreUser), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectzone.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectzone.go deleted file mode 100644 index 6767576c0..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectzone.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephObjectZones implements CephObjectZoneInterface -type FakeCephObjectZones struct { - Fake *FakeCephV1 - ns string -} - -var cephobjectzonesResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephobjectzones"} - -var cephobjectzonesKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephObjectZone"} - -// Get takes name of the cephObjectZone, and returns the corresponding cephObjectZone object, and an error if there is any. -func (c *FakeCephObjectZones) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephObjectZone, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephobjectzonesResource, c.ns, name), &cephrookiov1.CephObjectZone{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectZone), err -} - -// List takes label and field selectors, and returns the list of CephObjectZones that match those selectors. -func (c *FakeCephObjectZones) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephObjectZoneList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephobjectzonesResource, cephobjectzonesKind, c.ns, opts), &cephrookiov1.CephObjectZoneList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephObjectZoneList{ListMeta: obj.(*cephrookiov1.CephObjectZoneList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephObjectZoneList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephObjectZones. -func (c *FakeCephObjectZones) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephobjectzonesResource, c.ns, opts)) - -} - -// Create takes the representation of a cephObjectZone and creates it. Returns the server's representation of the cephObjectZone, and an error, if there is any. -func (c *FakeCephObjectZones) Create(ctx context.Context, cephObjectZone *cephrookiov1.CephObjectZone, opts v1.CreateOptions) (result *cephrookiov1.CephObjectZone, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephobjectzonesResource, c.ns, cephObjectZone), &cephrookiov1.CephObjectZone{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectZone), err -} - -// Update takes the representation of a cephObjectZone and updates it. Returns the server's representation of the cephObjectZone, and an error, if there is any. -func (c *FakeCephObjectZones) Update(ctx context.Context, cephObjectZone *cephrookiov1.CephObjectZone, opts v1.UpdateOptions) (result *cephrookiov1.CephObjectZone, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephobjectzonesResource, c.ns, cephObjectZone), &cephrookiov1.CephObjectZone{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectZone), err -} - -// Delete takes name of the cephObjectZone and deletes it. Returns an error if one occurs. -func (c *FakeCephObjectZones) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephobjectzonesResource, c.ns, name), &cephrookiov1.CephObjectZone{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephObjectZones) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephobjectzonesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephObjectZoneList{}) - return err -} - -// Patch applies the patch and returns the patched cephObjectZone. -func (c *FakeCephObjectZones) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephObjectZone, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephobjectzonesResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephObjectZone{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectZone), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectzonegroup.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectzonegroup.go deleted file mode 100644 index 952f3a7fa..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephobjectzonegroup.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephObjectZoneGroups implements CephObjectZoneGroupInterface -type FakeCephObjectZoneGroups struct { - Fake *FakeCephV1 - ns string -} - -var cephobjectzonegroupsResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephobjectzonegroups"} - -var cephobjectzonegroupsKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephObjectZoneGroup"} - -// Get takes name of the cephObjectZoneGroup, and returns the corresponding cephObjectZoneGroup object, and an error if there is any. -func (c *FakeCephObjectZoneGroups) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephObjectZoneGroup, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephobjectzonegroupsResource, c.ns, name), &cephrookiov1.CephObjectZoneGroup{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectZoneGroup), err -} - -// List takes label and field selectors, and returns the list of CephObjectZoneGroups that match those selectors. -func (c *FakeCephObjectZoneGroups) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephObjectZoneGroupList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephobjectzonegroupsResource, cephobjectzonegroupsKind, c.ns, opts), &cephrookiov1.CephObjectZoneGroupList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephObjectZoneGroupList{ListMeta: obj.(*cephrookiov1.CephObjectZoneGroupList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephObjectZoneGroupList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephObjectZoneGroups. -func (c *FakeCephObjectZoneGroups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephobjectzonegroupsResource, c.ns, opts)) - -} - -// Create takes the representation of a cephObjectZoneGroup and creates it. Returns the server's representation of the cephObjectZoneGroup, and an error, if there is any. -func (c *FakeCephObjectZoneGroups) Create(ctx context.Context, cephObjectZoneGroup *cephrookiov1.CephObjectZoneGroup, opts v1.CreateOptions) (result *cephrookiov1.CephObjectZoneGroup, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephobjectzonegroupsResource, c.ns, cephObjectZoneGroup), &cephrookiov1.CephObjectZoneGroup{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectZoneGroup), err -} - -// Update takes the representation of a cephObjectZoneGroup and updates it. Returns the server's representation of the cephObjectZoneGroup, and an error, if there is any. -func (c *FakeCephObjectZoneGroups) Update(ctx context.Context, cephObjectZoneGroup *cephrookiov1.CephObjectZoneGroup, opts v1.UpdateOptions) (result *cephrookiov1.CephObjectZoneGroup, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephobjectzonegroupsResource, c.ns, cephObjectZoneGroup), &cephrookiov1.CephObjectZoneGroup{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectZoneGroup), err -} - -// Delete takes name of the cephObjectZoneGroup and deletes it. Returns an error if one occurs. -func (c *FakeCephObjectZoneGroups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephobjectzonegroupsResource, c.ns, name), &cephrookiov1.CephObjectZoneGroup{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephObjectZoneGroups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephobjectzonegroupsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephObjectZoneGroupList{}) - return err -} - -// Patch applies the patch and returns the patched cephObjectZoneGroup. -func (c *FakeCephObjectZoneGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephObjectZoneGroup, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephobjectzonegroupsResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephObjectZoneGroup{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephObjectZoneGroup), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephrbdmirror.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephrbdmirror.go deleted file mode 100644 index e55fd4fab..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephrbdmirror.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeCephRBDMirrors implements CephRBDMirrorInterface -type FakeCephRBDMirrors struct { - Fake *FakeCephV1 - ns string -} - -var cephrbdmirrorsResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephrbdmirrors"} - -var cephrbdmirrorsKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephRBDMirror"} - -// Get takes name of the cephRBDMirror, and returns the corresponding cephRBDMirror object, and an error if there is any. -func (c *FakeCephRBDMirrors) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephRBDMirror, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cephrbdmirrorsResource, c.ns, name), &cephrookiov1.CephRBDMirror{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephRBDMirror), err -} - -// List takes label and field selectors, and returns the list of CephRBDMirrors that match those selectors. -func (c *FakeCephRBDMirrors) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephRBDMirrorList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cephrbdmirrorsResource, cephrbdmirrorsKind, c.ns, opts), &cephrookiov1.CephRBDMirrorList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &cephrookiov1.CephRBDMirrorList{ListMeta: obj.(*cephrookiov1.CephRBDMirrorList).ListMeta} - for _, item := range obj.(*cephrookiov1.CephRBDMirrorList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cephRBDMirrors. -func (c *FakeCephRBDMirrors) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cephrbdmirrorsResource, c.ns, opts)) - -} - -// Create takes the representation of a cephRBDMirror and creates it. Returns the server's representation of the cephRBDMirror, and an error, if there is any. -func (c *FakeCephRBDMirrors) Create(ctx context.Context, cephRBDMirror *cephrookiov1.CephRBDMirror, opts v1.CreateOptions) (result *cephrookiov1.CephRBDMirror, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cephrbdmirrorsResource, c.ns, cephRBDMirror), &cephrookiov1.CephRBDMirror{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephRBDMirror), err -} - -// Update takes the representation of a cephRBDMirror and updates it. Returns the server's representation of the cephRBDMirror, and an error, if there is any. -func (c *FakeCephRBDMirrors) Update(ctx context.Context, cephRBDMirror *cephrookiov1.CephRBDMirror, opts v1.UpdateOptions) (result *cephrookiov1.CephRBDMirror, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cephrbdmirrorsResource, c.ns, cephRBDMirror), &cephrookiov1.CephRBDMirror{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephRBDMirror), err -} - -// Delete takes name of the cephRBDMirror and deletes it. Returns an error if one occurs. -func (c *FakeCephRBDMirrors) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cephrbdmirrorsResource, c.ns, name), &cephrookiov1.CephRBDMirror{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCephRBDMirrors) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cephrbdmirrorsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &cephrookiov1.CephRBDMirrorList{}) - return err -} - -// Patch applies the patch and returns the patched cephRBDMirror. -func (c *FakeCephRBDMirrors) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephRBDMirror, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cephrbdmirrorsResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephRBDMirror{}) - - if obj == nil { - return nil, err - } - return obj.(*cephrookiov1.CephRBDMirror), err -} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go deleted file mode 100644 index 675dadf45..000000000 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type CephBlockPoolExpansion interface{} - -type CephClientExpansion interface{} - -type CephClusterExpansion interface{} - -type CephFilesystemExpansion interface{} - -type CephFilesystemMirrorExpansion interface{} - -type CephNFSExpansion interface{} - -type CephObjectRealmExpansion interface{} - -type CephObjectStoreExpansion interface{} - -type CephObjectStoreUserExpansion interface{} - -type CephObjectZoneExpansion interface{} - -type CephObjectZoneGroupExpansion interface{} - -type CephRBDMirrorExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/doc.go deleted file mode 100644 index df51baa4d..000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfs.rook.io_client.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfs.rook.io_client.go deleted file mode 100644 index 547010476..000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfs.rook.io_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeNfsV1alpha1 struct { - *testing.Fake -} - -func (c *FakeNfsV1alpha1) NFSServers(namespace string) v1alpha1.NFSServerInterface { - return &FakeNFSServers{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeNfsV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfsserver.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfsserver.go deleted file mode 100644 index c17661995..000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfsserver.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeNFSServers implements NFSServerInterface -type FakeNFSServers struct { - Fake *FakeNfsV1alpha1 - ns string -} - -var nfsserversResource = schema.GroupVersionResource{Group: "nfs.rook.io", Version: "v1alpha1", Resource: "nfsservers"} - -var nfsserversKind = schema.GroupVersionKind{Group: "nfs.rook.io", Version: "v1alpha1", Kind: "NFSServer"} - -// Get takes name of the nFSServer, and returns the corresponding nFSServer object, and an error if there is any. -func (c *FakeNFSServers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NFSServer, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(nfsserversResource, c.ns, name), &v1alpha1.NFSServer{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NFSServer), err -} - -// List takes label and field selectors, and returns the list of NFSServers that match those selectors. -func (c *FakeNFSServers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NFSServerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(nfsserversResource, nfsserversKind, c.ns, opts), &v1alpha1.NFSServerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.NFSServerList{ListMeta: obj.(*v1alpha1.NFSServerList).ListMeta} - for _, item := range obj.(*v1alpha1.NFSServerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested nFSServers. -func (c *FakeNFSServers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(nfsserversResource, c.ns, opts)) - -} - -// Create takes the representation of a nFSServer and creates it. Returns the server's representation of the nFSServer, and an error, if there is any. -func (c *FakeNFSServers) Create(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.CreateOptions) (result *v1alpha1.NFSServer, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(nfsserversResource, c.ns, nFSServer), &v1alpha1.NFSServer{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NFSServer), err -} - -// Update takes the representation of a nFSServer and updates it. Returns the server's representation of the nFSServer, and an error, if there is any. -func (c *FakeNFSServers) Update(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.UpdateOptions) (result *v1alpha1.NFSServer, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(nfsserversResource, c.ns, nFSServer), &v1alpha1.NFSServer{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NFSServer), err -} - -// Delete takes name of the nFSServer and deletes it. Returns an error if one occurs. -func (c *FakeNFSServers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(nfsserversResource, c.ns, name), &v1alpha1.NFSServer{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeNFSServers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(nfsserversResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.NFSServerList{}) - return err -} - -// Patch applies the patch and returns the patched nFSServer. -func (c *FakeNFSServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NFSServer, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(nfsserversResource, c.ns, name, pt, data, subresources...), &v1alpha1.NFSServer{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NFSServer), err -} diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/generated_expansion.go deleted file mode 100644 index 39cd4986f..000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type NFSServerExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfs.rook.io_client.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfs.rook.io_client.go deleted file mode 100644 index 53ab90449..000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfs.rook.io_client.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - rest "k8s.io/client-go/rest" -) - -type NfsV1alpha1Interface interface { - RESTClient() rest.Interface - NFSServersGetter -} - -// NfsV1alpha1Client is used to interact with features provided by the nfs.rook.io group. -type NfsV1alpha1Client struct { - restClient rest.Interface -} - -func (c *NfsV1alpha1Client) NFSServers(namespace string) NFSServerInterface { - return newNFSServers(c, namespace) -} - -// NewForConfig creates a new NfsV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*NfsV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &NfsV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new NfsV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *NfsV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new NfsV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *NfsV1alpha1Client { - return &NfsV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *NfsV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfsserver.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfsserver.go deleted file mode 100644 index 8cbfd05a9..000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfsserver.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// NFSServersGetter has a method to return a NFSServerInterface. -// A group's client should implement this interface. -type NFSServersGetter interface { - NFSServers(namespace string) NFSServerInterface -} - -// NFSServerInterface has methods to work with NFSServer resources. -type NFSServerInterface interface { - Create(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.CreateOptions) (*v1alpha1.NFSServer, error) - Update(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.UpdateOptions) (*v1alpha1.NFSServer, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.NFSServer, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.NFSServerList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NFSServer, err error) - NFSServerExpansion -} - -// nFSServers implements NFSServerInterface -type nFSServers struct { - client rest.Interface - ns string -} - -// newNFSServers returns a NFSServers -func newNFSServers(c *NfsV1alpha1Client, namespace string) *nFSServers { - return &nFSServers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the nFSServer, and returns the corresponding nFSServer object, and an error if there is any. -func (c *nFSServers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NFSServer, err error) { - result = &v1alpha1.NFSServer{} - err = c.client.Get(). - Namespace(c.ns). - Resource("nfsservers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NFSServers that match those selectors. -func (c *nFSServers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NFSServerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.NFSServerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("nfsservers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nFSServers. -func (c *nFSServers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("nfsservers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a nFSServer and creates it. Returns the server's representation of the nFSServer, and an error, if there is any. -func (c *nFSServers) Create(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.CreateOptions) (result *v1alpha1.NFSServer, err error) { - result = &v1alpha1.NFSServer{} - err = c.client.Post(). - Namespace(c.ns). - Resource("nfsservers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(nFSServer). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a nFSServer and updates it. Returns the server's representation of the nFSServer, and an error, if there is any. -func (c *nFSServers) Update(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.UpdateOptions) (result *v1alpha1.NFSServer, err error) { - result = &v1alpha1.NFSServer{} - err = c.client.Put(). - Namespace(c.ns). - Resource("nfsservers"). - Name(nFSServer.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(nFSServer). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the nFSServer and deletes it. Returns an error if one occurs. -func (c *nFSServers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("nfsservers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nFSServers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("nfsservers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched nFSServer. -func (c *nFSServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NFSServer, err error) { - result = &v1alpha1.NFSServer{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("nfsservers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake/fake_rook.io_client.go b/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake/fake_rook.io_client.go index c3b6fa86c..53e9b0756 100644 --- a/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake/fake_rook.io_client.go +++ b/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake/fake_rook.io_client.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - v1alpha2 "github.com/rook/rook/pkg/client/clientset/versioned/typed/rook.io/v1alpha2" + v1alpha2 "github.com/rook/cassandra/pkg/client/clientset/versioned/typed/rook.io/v1alpha2" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) diff --git a/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake/fake_volume.go b/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake/fake_volume.go index e1f5df148..cc4168904 100644 --- a/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake/fake_volume.go +++ b/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake/fake_volume.go @@ -21,7 +21,7 @@ package fake import ( "context" - v1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" + v1alpha2 "github.com/rook/cassandra/pkg/apis/rook.io/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/rook.io_client.go b/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/rook.io_client.go index 8ec4e9ba4..97821f78d 100644 --- a/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/rook.io_client.go +++ b/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/rook.io_client.go @@ -19,8 +19,8 @@ limitations under the License. package v1alpha2 import ( - v1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + v1alpha2 "github.com/rook/cassandra/pkg/apis/rook.io/v1alpha2" + "github.com/rook/cassandra/pkg/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) diff --git a/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/volume.go b/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/volume.go index a6df3abf4..a4db76310 100644 --- a/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/volume.go +++ b/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/volume.go @@ -22,8 +22,8 @@ import ( "context" "time" - v1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + v1alpha2 "github.com/rook/cassandra/pkg/apis/rook.io/v1alpha2" + scheme "github.com/rook/cassandra/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/informers/externalversions/cassandra.rook.io/interface.go b/pkg/client/informers/externalversions/cassandra.rook.io/interface.go index e8a00018a..ea49b892f 100644 --- a/pkg/client/informers/externalversions/cassandra.rook.io/interface.go +++ b/pkg/client/informers/externalversions/cassandra.rook.io/interface.go @@ -19,8 +19,8 @@ limitations under the License. package cassandra import ( - v1alpha1 "github.com/rook/rook/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/rook/cassandra/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1" + internalinterfaces "github.com/rook/cassandra/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to each of this group's versions. diff --git a/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/cluster.go b/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/cluster.go index 368e176ce..539afff97 100644 --- a/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/cluster.go +++ b/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/cluster.go @@ -22,10 +22,10 @@ import ( "context" time "time" - cassandrarookiov1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/rook/rook/pkg/client/listers/cassandra.rook.io/v1alpha1" + cassandrarookiov1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + versioned "github.com/rook/cassandra/pkg/client/clientset/versioned" + internalinterfaces "github.com/rook/cassandra/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/rook/cassandra/pkg/client/listers/cassandra.rook.io/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/interface.go b/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/interface.go index f5556f182..625dc7f22 100644 --- a/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/interface.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" + internalinterfaces "github.com/rook/cassandra/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. diff --git a/pkg/client/informers/externalversions/ceph.rook.io/interface.go b/pkg/client/informers/externalversions/ceph.rook.io/interface.go deleted file mode 100644 index c2800a929..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package ceph - -import ( - v1 "github.com/rook/rook/pkg/client/informers/externalversions/ceph.rook.io/v1" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1 provides access to shared informers for resources in V1. - V1() v1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1 returns a new v1.Interface. -func (g *group) V1() v1.Interface { - return v1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephblockpool.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephblockpool.go deleted file mode 100644 index 63f4f6467..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephblockpool.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephBlockPoolInformer provides access to a shared informer and lister for -// CephBlockPools. -type CephBlockPoolInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephBlockPoolLister -} - -type cephBlockPoolInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephBlockPoolInformer constructs a new informer for CephBlockPool type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephBlockPoolInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephBlockPoolInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephBlockPoolInformer constructs a new informer for CephBlockPool type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephBlockPoolInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephBlockPools(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephBlockPools(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephBlockPool{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephBlockPoolInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephBlockPoolInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephBlockPoolInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephBlockPool{}, f.defaultInformer) -} - -func (f *cephBlockPoolInformer) Lister() v1.CephBlockPoolLister { - return v1.NewCephBlockPoolLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephclient.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephclient.go deleted file mode 100644 index 6f07a1659..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephclient.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephClientInformer provides access to a shared informer and lister for -// CephClients. -type CephClientInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephClientLister -} - -type cephClientInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephClientInformer constructs a new informer for CephClient type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephClientInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephClientInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephClientInformer constructs a new informer for CephClient type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephClientInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephClients(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephClients(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephClient{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephClientInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephClientInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephClientInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephClient{}, f.defaultInformer) -} - -func (f *cephClientInformer) Lister() v1.CephClientLister { - return v1.NewCephClientLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephcluster.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephcluster.go deleted file mode 100644 index 23cf65440..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephcluster.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephClusterInformer provides access to a shared informer and lister for -// CephClusters. -type CephClusterInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephClusterLister -} - -type cephClusterInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephClusterInformer constructs a new informer for CephCluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephClusterInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephClusterInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephClusterInformer constructs a new informer for CephCluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephClusterInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephClusters(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephClusters(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephCluster{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephClusterInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephClusterInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephClusterInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephCluster{}, f.defaultInformer) -} - -func (f *cephClusterInformer) Lister() v1.CephClusterLister { - return v1.NewCephClusterLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephfilesystem.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephfilesystem.go deleted file mode 100644 index a08aebb93..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephfilesystem.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephFilesystemInformer provides access to a shared informer and lister for -// CephFilesystems. -type CephFilesystemInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephFilesystemLister -} - -type cephFilesystemInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephFilesystemInformer constructs a new informer for CephFilesystem type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephFilesystemInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephFilesystemInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephFilesystemInformer constructs a new informer for CephFilesystem type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephFilesystemInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephFilesystems(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephFilesystems(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephFilesystem{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephFilesystemInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephFilesystemInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephFilesystemInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephFilesystem{}, f.defaultInformer) -} - -func (f *cephFilesystemInformer) Lister() v1.CephFilesystemLister { - return v1.NewCephFilesystemLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephfilesystemmirror.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephfilesystemmirror.go deleted file mode 100644 index b697fa68e..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephfilesystemmirror.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephFilesystemMirrorInformer provides access to a shared informer and lister for -// CephFilesystemMirrors. -type CephFilesystemMirrorInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephFilesystemMirrorLister -} - -type cephFilesystemMirrorInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephFilesystemMirrorInformer constructs a new informer for CephFilesystemMirror type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephFilesystemMirrorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephFilesystemMirrorInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephFilesystemMirrorInformer constructs a new informer for CephFilesystemMirror type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephFilesystemMirrorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephFilesystemMirrors(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephFilesystemMirrors(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephFilesystemMirror{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephFilesystemMirrorInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephFilesystemMirrorInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephFilesystemMirrorInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephFilesystemMirror{}, f.defaultInformer) -} - -func (f *cephFilesystemMirrorInformer) Lister() v1.CephFilesystemMirrorLister { - return v1.NewCephFilesystemMirrorLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephnfs.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephnfs.go deleted file mode 100644 index a4f0dc1dd..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephnfs.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephNFSInformer provides access to a shared informer and lister for -// CephNFSes. -type CephNFSInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephNFSLister -} - -type cephNFSInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephNFSInformer constructs a new informer for CephNFS type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephNFSInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephNFSInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephNFSInformer constructs a new informer for CephNFS type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephNFSInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephNFSes(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephNFSes(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephNFS{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephNFSInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephNFSInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephNFSInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephNFS{}, f.defaultInformer) -} - -func (f *cephNFSInformer) Lister() v1.CephNFSLister { - return v1.NewCephNFSLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectrealm.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectrealm.go deleted file mode 100644 index 9bea46d67..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectrealm.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephObjectRealmInformer provides access to a shared informer and lister for -// CephObjectRealms. -type CephObjectRealmInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephObjectRealmLister -} - -type cephObjectRealmInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephObjectRealmInformer constructs a new informer for CephObjectRealm type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephObjectRealmInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephObjectRealmInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephObjectRealmInformer constructs a new informer for CephObjectRealm type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephObjectRealmInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephObjectRealms(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephObjectRealms(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephObjectRealm{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephObjectRealmInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephObjectRealmInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephObjectRealmInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephObjectRealm{}, f.defaultInformer) -} - -func (f *cephObjectRealmInformer) Lister() v1.CephObjectRealmLister { - return v1.NewCephObjectRealmLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectstore.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectstore.go deleted file mode 100644 index 4e80b1f9f..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectstore.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephObjectStoreInformer provides access to a shared informer and lister for -// CephObjectStores. -type CephObjectStoreInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephObjectStoreLister -} - -type cephObjectStoreInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephObjectStoreInformer constructs a new informer for CephObjectStore type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephObjectStoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephObjectStoreInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephObjectStoreInformer constructs a new informer for CephObjectStore type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephObjectStoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephObjectStores(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephObjectStores(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephObjectStore{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephObjectStoreInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephObjectStoreInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephObjectStoreInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephObjectStore{}, f.defaultInformer) -} - -func (f *cephObjectStoreInformer) Lister() v1.CephObjectStoreLister { - return v1.NewCephObjectStoreLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectstoreuser.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectstoreuser.go deleted file mode 100644 index 9270c21bb..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectstoreuser.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephObjectStoreUserInformer provides access to a shared informer and lister for -// CephObjectStoreUsers. -type CephObjectStoreUserInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephObjectStoreUserLister -} - -type cephObjectStoreUserInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephObjectStoreUserInformer constructs a new informer for CephObjectStoreUser type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephObjectStoreUserInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephObjectStoreUserInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephObjectStoreUserInformer constructs a new informer for CephObjectStoreUser type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephObjectStoreUserInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephObjectStoreUsers(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephObjectStoreUsers(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephObjectStoreUser{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephObjectStoreUserInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephObjectStoreUserInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephObjectStoreUserInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephObjectStoreUser{}, f.defaultInformer) -} - -func (f *cephObjectStoreUserInformer) Lister() v1.CephObjectStoreUserLister { - return v1.NewCephObjectStoreUserLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectzone.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectzone.go deleted file mode 100644 index 4bcb3eab3..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectzone.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephObjectZoneInformer provides access to a shared informer and lister for -// CephObjectZones. -type CephObjectZoneInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephObjectZoneLister -} - -type cephObjectZoneInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephObjectZoneInformer constructs a new informer for CephObjectZone type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephObjectZoneInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephObjectZoneInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephObjectZoneInformer constructs a new informer for CephObjectZone type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephObjectZoneInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephObjectZones(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephObjectZones(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephObjectZone{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephObjectZoneInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephObjectZoneInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephObjectZoneInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephObjectZone{}, f.defaultInformer) -} - -func (f *cephObjectZoneInformer) Lister() v1.CephObjectZoneLister { - return v1.NewCephObjectZoneLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectzonegroup.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectzonegroup.go deleted file mode 100644 index 16ad27bd0..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephobjectzonegroup.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephObjectZoneGroupInformer provides access to a shared informer and lister for -// CephObjectZoneGroups. -type CephObjectZoneGroupInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephObjectZoneGroupLister -} - -type cephObjectZoneGroupInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephObjectZoneGroupInformer constructs a new informer for CephObjectZoneGroup type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephObjectZoneGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephObjectZoneGroupInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephObjectZoneGroupInformer constructs a new informer for CephObjectZoneGroup type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephObjectZoneGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephObjectZoneGroups(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephObjectZoneGroups(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephObjectZoneGroup{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephObjectZoneGroupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephObjectZoneGroupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephObjectZoneGroupInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephObjectZoneGroup{}, f.defaultInformer) -} - -func (f *cephObjectZoneGroupInformer) Lister() v1.CephObjectZoneGroupLister { - return v1.NewCephObjectZoneGroupLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephrbdmirror.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephrbdmirror.go deleted file mode 100644 index 00d095194..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephrbdmirror.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - "context" - time "time" - - cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// CephRBDMirrorInformer provides access to a shared informer and lister for -// CephRBDMirrors. -type CephRBDMirrorInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.CephRBDMirrorLister -} - -type cephRBDMirrorInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewCephRBDMirrorInformer constructs a new informer for CephRBDMirror type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCephRBDMirrorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCephRBDMirrorInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredCephRBDMirrorInformer constructs a new informer for CephRBDMirror type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCephRBDMirrorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephRBDMirrors(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CephV1().CephRBDMirrors(namespace).Watch(context.TODO(), options) - }, - }, - &cephrookiov1.CephRBDMirror{}, - resyncPeriod, - indexers, - ) -} - -func (f *cephRBDMirrorInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCephRBDMirrorInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cephRBDMirrorInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cephrookiov1.CephRBDMirror{}, f.defaultInformer) -} - -func (f *cephRBDMirrorInformer) Lister() v1.CephRBDMirrorLister { - return v1.NewCephRBDMirrorLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go deleted file mode 100644 index 869909158..000000000 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // CephBlockPools returns a CephBlockPoolInformer. - CephBlockPools() CephBlockPoolInformer - // CephClients returns a CephClientInformer. - CephClients() CephClientInformer - // CephClusters returns a CephClusterInformer. - CephClusters() CephClusterInformer - // CephFilesystems returns a CephFilesystemInformer. - CephFilesystems() CephFilesystemInformer - // CephFilesystemMirrors returns a CephFilesystemMirrorInformer. - CephFilesystemMirrors() CephFilesystemMirrorInformer - // CephNFSes returns a CephNFSInformer. - CephNFSes() CephNFSInformer - // CephObjectRealms returns a CephObjectRealmInformer. - CephObjectRealms() CephObjectRealmInformer - // CephObjectStores returns a CephObjectStoreInformer. - CephObjectStores() CephObjectStoreInformer - // CephObjectStoreUsers returns a CephObjectStoreUserInformer. - CephObjectStoreUsers() CephObjectStoreUserInformer - // CephObjectZones returns a CephObjectZoneInformer. - CephObjectZones() CephObjectZoneInformer - // CephObjectZoneGroups returns a CephObjectZoneGroupInformer. - CephObjectZoneGroups() CephObjectZoneGroupInformer - // CephRBDMirrors returns a CephRBDMirrorInformer. - CephRBDMirrors() CephRBDMirrorInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// CephBlockPools returns a CephBlockPoolInformer. -func (v *version) CephBlockPools() CephBlockPoolInformer { - return &cephBlockPoolInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephClients returns a CephClientInformer. -func (v *version) CephClients() CephClientInformer { - return &cephClientInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephClusters returns a CephClusterInformer. -func (v *version) CephClusters() CephClusterInformer { - return &cephClusterInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephFilesystems returns a CephFilesystemInformer. -func (v *version) CephFilesystems() CephFilesystemInformer { - return &cephFilesystemInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephFilesystemMirrors returns a CephFilesystemMirrorInformer. -func (v *version) CephFilesystemMirrors() CephFilesystemMirrorInformer { - return &cephFilesystemMirrorInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephNFSes returns a CephNFSInformer. -func (v *version) CephNFSes() CephNFSInformer { - return &cephNFSInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephObjectRealms returns a CephObjectRealmInformer. -func (v *version) CephObjectRealms() CephObjectRealmInformer { - return &cephObjectRealmInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephObjectStores returns a CephObjectStoreInformer. -func (v *version) CephObjectStores() CephObjectStoreInformer { - return &cephObjectStoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephObjectStoreUsers returns a CephObjectStoreUserInformer. -func (v *version) CephObjectStoreUsers() CephObjectStoreUserInformer { - return &cephObjectStoreUserInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephObjectZones returns a CephObjectZoneInformer. -func (v *version) CephObjectZones() CephObjectZoneInformer { - return &cephObjectZoneInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephObjectZoneGroups returns a CephObjectZoneGroupInformer. -func (v *version) CephObjectZoneGroups() CephObjectZoneGroupInformer { - return &cephObjectZoneGroupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// CephRBDMirrors returns a CephRBDMirrorInformer. -func (v *version) CephRBDMirrors() CephRBDMirrorInformer { - return &cephRBDMirrorInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index ca31ecada..f75d440d0 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -23,12 +23,10 @@ import ( sync "sync" time "time" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - cassandrarookio "github.com/rook/rook/pkg/client/informers/externalversions/cassandra.rook.io" - cephrookio "github.com/rook/rook/pkg/client/informers/externalversions/ceph.rook.io" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - nfsrookio "github.com/rook/rook/pkg/client/informers/externalversions/nfs.rook.io" - rookio "github.com/rook/rook/pkg/client/informers/externalversions/rook.io" + versioned "github.com/rook/cassandra/pkg/client/clientset/versioned" + cassandrarookio "github.com/rook/cassandra/pkg/client/informers/externalversions/cassandra.rook.io" + internalinterfaces "github.com/rook/cassandra/pkg/client/informers/externalversions/internalinterfaces" + rookio "github.com/rook/cassandra/pkg/client/informers/externalversions/rook.io" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -176,8 +174,6 @@ type SharedInformerFactory interface { WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool Cassandra() cassandrarookio.Interface - Ceph() cephrookio.Interface - Nfs() nfsrookio.Interface Rook() rookio.Interface } @@ -185,14 +181,6 @@ func (f *sharedInformerFactory) Cassandra() cassandrarookio.Interface { return cassandrarookio.New(f, f.namespace, f.tweakListOptions) } -func (f *sharedInformerFactory) Ceph() cephrookio.Interface { - return cephrookio.New(f, f.namespace, f.tweakListOptions) -} - -func (f *sharedInformerFactory) Nfs() nfsrookio.Interface { - return nfsrookio.New(f, f.namespace, f.tweakListOptions) -} - func (f *sharedInformerFactory) Rook() rookio.Interface { return rookio.New(f, f.namespace, f.tweakListOptions) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 98a981182..893652022 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -21,10 +21,8 @@ package externalversions import ( "fmt" - v1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - nfsrookiov1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - v1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" + v1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + v1alpha2 "github.com/rook/cassandra/pkg/apis/rook.io/v1alpha2" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -59,36 +57,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1alpha1.SchemeGroupVersion.WithResource("clusters"): return &genericInformer{resource: resource.GroupResource(), informer: f.Cassandra().V1alpha1().Clusters().Informer()}, nil - // Group=ceph.rook.io, Version=v1 - case v1.SchemeGroupVersion.WithResource("cephblockpools"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephBlockPools().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephclients"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephClients().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephclusters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephClusters().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephfilesystems"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephFilesystems().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephfilesystemmirrors"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephFilesystemMirrors().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephnfses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephNFSes().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephobjectrealms"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephObjectRealms().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephobjectstores"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephObjectStores().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephobjectstoreusers"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephObjectStoreUsers().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephobjectzones"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephObjectZones().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephobjectzonegroups"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephObjectZoneGroups().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("cephrbdmirrors"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephRBDMirrors().Informer()}, nil - - // Group=nfs.rook.io, Version=v1alpha1 - case nfsrookiov1alpha1.SchemeGroupVersion.WithResource("nfsservers"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Nfs().V1alpha1().NFSServers().Informer()}, nil - // Group=rook.io, Version=v1alpha2 case v1alpha2.SchemeGroupVersion.WithResource("volumes"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rook().V1alpha2().Volumes().Informer()}, nil diff --git a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 3ea7f0171..29129171e 100644 --- a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -21,7 +21,7 @@ package internalinterfaces import ( time "time" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" + versioned "github.com/rook/cassandra/pkg/client/clientset/versioned" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" diff --git a/pkg/client/informers/externalversions/nfs.rook.io/interface.go b/pkg/client/informers/externalversions/nfs.rook.io/interface.go deleted file mode 100644 index 1e9c18384..000000000 --- a/pkg/client/informers/externalversions/nfs.rook.io/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package nfs - -import ( - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/rook/rook/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/interface.go b/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/interface.go deleted file mode 100644 index c0687a846..000000000 --- a/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/interface.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // NFSServers returns a NFSServerInformer. - NFSServers() NFSServerInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// NFSServers returns a NFSServerInformer. -func (v *version) NFSServers() NFSServerInformer { - return &nFSServerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/nfsserver.go b/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/nfsserver.go deleted file mode 100644 index d474dd54a..000000000 --- a/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/nfsserver.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - nfsrookiov1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/rook/rook/pkg/client/listers/nfs.rook.io/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// NFSServerInformer provides access to a shared informer and lister for -// NFSServers. -type NFSServerInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.NFSServerLister -} - -type nFSServerInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewNFSServerInformer constructs a new informer for NFSServer type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewNFSServerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredNFSServerInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredNFSServerInformer constructs a new informer for NFSServer type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredNFSServerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NfsV1alpha1().NFSServers(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NfsV1alpha1().NFSServers(namespace).Watch(context.TODO(), options) - }, - }, - &nfsrookiov1alpha1.NFSServer{}, - resyncPeriod, - indexers, - ) -} - -func (f *nFSServerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredNFSServerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *nFSServerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&nfsrookiov1alpha1.NFSServer{}, f.defaultInformer) -} - -func (f *nFSServerInformer) Lister() v1alpha1.NFSServerLister { - return v1alpha1.NewNFSServerLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/rook.io/interface.go b/pkg/client/informers/externalversions/rook.io/interface.go index ed8268684..6d7a6ca09 100644 --- a/pkg/client/informers/externalversions/rook.io/interface.go +++ b/pkg/client/informers/externalversions/rook.io/interface.go @@ -19,8 +19,8 @@ limitations under the License. package rook import ( - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1alpha2 "github.com/rook/rook/pkg/client/informers/externalversions/rook.io/v1alpha2" + internalinterfaces "github.com/rook/cassandra/pkg/client/informers/externalversions/internalinterfaces" + v1alpha2 "github.com/rook/cassandra/pkg/client/informers/externalversions/rook.io/v1alpha2" ) // Interface provides access to each of this group's versions. diff --git a/pkg/client/informers/externalversions/rook.io/v1alpha2/interface.go b/pkg/client/informers/externalversions/rook.io/v1alpha2/interface.go index 5c5a58cf1..21cfdce06 100644 --- a/pkg/client/informers/externalversions/rook.io/v1alpha2/interface.go +++ b/pkg/client/informers/externalversions/rook.io/v1alpha2/interface.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha2 import ( - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" + internalinterfaces "github.com/rook/cassandra/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. diff --git a/pkg/client/informers/externalversions/rook.io/v1alpha2/volume.go b/pkg/client/informers/externalversions/rook.io/v1alpha2/volume.go index 079aed67d..f617329d6 100644 --- a/pkg/client/informers/externalversions/rook.io/v1alpha2/volume.go +++ b/pkg/client/informers/externalversions/rook.io/v1alpha2/volume.go @@ -22,10 +22,10 @@ import ( "context" time "time" - rookiov1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1alpha2 "github.com/rook/rook/pkg/client/listers/rook.io/v1alpha2" + rookiov1alpha2 "github.com/rook/cassandra/pkg/apis/rook.io/v1alpha2" + versioned "github.com/rook/cassandra/pkg/client/clientset/versioned" + internalinterfaces "github.com/rook/cassandra/pkg/client/informers/externalversions/internalinterfaces" + v1alpha2 "github.com/rook/cassandra/pkg/client/listers/rook.io/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/listers/cassandra.rook.io/v1alpha1/cluster.go b/pkg/client/listers/cassandra.rook.io/v1alpha1/cluster.go index da83fc9a5..f0c295bd7 100644 --- a/pkg/client/listers/cassandra.rook.io/v1alpha1/cluster.go +++ b/pkg/client/listers/cassandra.rook.io/v1alpha1/cluster.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" + v1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/client/listers/ceph.rook.io/v1/cephblockpool.go b/pkg/client/listers/ceph.rook.io/v1/cephblockpool.go deleted file mode 100644 index d7abf2034..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephblockpool.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephBlockPoolLister helps list CephBlockPools. -// All objects returned here must be treated as read-only. -type CephBlockPoolLister interface { - // List lists all CephBlockPools in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephBlockPool, err error) - // CephBlockPools returns an object that can list and get CephBlockPools. - CephBlockPools(namespace string) CephBlockPoolNamespaceLister - CephBlockPoolListerExpansion -} - -// cephBlockPoolLister implements the CephBlockPoolLister interface. -type cephBlockPoolLister struct { - indexer cache.Indexer -} - -// NewCephBlockPoolLister returns a new CephBlockPoolLister. -func NewCephBlockPoolLister(indexer cache.Indexer) CephBlockPoolLister { - return &cephBlockPoolLister{indexer: indexer} -} - -// List lists all CephBlockPools in the indexer. -func (s *cephBlockPoolLister) List(selector labels.Selector) (ret []*v1.CephBlockPool, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephBlockPool)) - }) - return ret, err -} - -// CephBlockPools returns an object that can list and get CephBlockPools. -func (s *cephBlockPoolLister) CephBlockPools(namespace string) CephBlockPoolNamespaceLister { - return cephBlockPoolNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephBlockPoolNamespaceLister helps list and get CephBlockPools. -// All objects returned here must be treated as read-only. -type CephBlockPoolNamespaceLister interface { - // List lists all CephBlockPools in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephBlockPool, err error) - // Get retrieves the CephBlockPool from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephBlockPool, error) - CephBlockPoolNamespaceListerExpansion -} - -// cephBlockPoolNamespaceLister implements the CephBlockPoolNamespaceLister -// interface. -type cephBlockPoolNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephBlockPools in the indexer for a given namespace. -func (s cephBlockPoolNamespaceLister) List(selector labels.Selector) (ret []*v1.CephBlockPool, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephBlockPool)) - }) - return ret, err -} - -// Get retrieves the CephBlockPool from the indexer for a given namespace and name. -func (s cephBlockPoolNamespaceLister) Get(name string) (*v1.CephBlockPool, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephblockpool"), name) - } - return obj.(*v1.CephBlockPool), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephclient.go b/pkg/client/listers/ceph.rook.io/v1/cephclient.go deleted file mode 100644 index c867d349d..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephclient.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephClientLister helps list CephClients. -// All objects returned here must be treated as read-only. -type CephClientLister interface { - // List lists all CephClients in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephClient, err error) - // CephClients returns an object that can list and get CephClients. - CephClients(namespace string) CephClientNamespaceLister - CephClientListerExpansion -} - -// cephClientLister implements the CephClientLister interface. -type cephClientLister struct { - indexer cache.Indexer -} - -// NewCephClientLister returns a new CephClientLister. -func NewCephClientLister(indexer cache.Indexer) CephClientLister { - return &cephClientLister{indexer: indexer} -} - -// List lists all CephClients in the indexer. -func (s *cephClientLister) List(selector labels.Selector) (ret []*v1.CephClient, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephClient)) - }) - return ret, err -} - -// CephClients returns an object that can list and get CephClients. -func (s *cephClientLister) CephClients(namespace string) CephClientNamespaceLister { - return cephClientNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephClientNamespaceLister helps list and get CephClients. -// All objects returned here must be treated as read-only. -type CephClientNamespaceLister interface { - // List lists all CephClients in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephClient, err error) - // Get retrieves the CephClient from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephClient, error) - CephClientNamespaceListerExpansion -} - -// cephClientNamespaceLister implements the CephClientNamespaceLister -// interface. -type cephClientNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephClients in the indexer for a given namespace. -func (s cephClientNamespaceLister) List(selector labels.Selector) (ret []*v1.CephClient, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephClient)) - }) - return ret, err -} - -// Get retrieves the CephClient from the indexer for a given namespace and name. -func (s cephClientNamespaceLister) Get(name string) (*v1.CephClient, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephclient"), name) - } - return obj.(*v1.CephClient), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephcluster.go b/pkg/client/listers/ceph.rook.io/v1/cephcluster.go deleted file mode 100644 index 1008cb28b..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephcluster.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephClusterLister helps list CephClusters. -// All objects returned here must be treated as read-only. -type CephClusterLister interface { - // List lists all CephClusters in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephCluster, err error) - // CephClusters returns an object that can list and get CephClusters. - CephClusters(namespace string) CephClusterNamespaceLister - CephClusterListerExpansion -} - -// cephClusterLister implements the CephClusterLister interface. -type cephClusterLister struct { - indexer cache.Indexer -} - -// NewCephClusterLister returns a new CephClusterLister. -func NewCephClusterLister(indexer cache.Indexer) CephClusterLister { - return &cephClusterLister{indexer: indexer} -} - -// List lists all CephClusters in the indexer. -func (s *cephClusterLister) List(selector labels.Selector) (ret []*v1.CephCluster, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephCluster)) - }) - return ret, err -} - -// CephClusters returns an object that can list and get CephClusters. -func (s *cephClusterLister) CephClusters(namespace string) CephClusterNamespaceLister { - return cephClusterNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephClusterNamespaceLister helps list and get CephClusters. -// All objects returned here must be treated as read-only. -type CephClusterNamespaceLister interface { - // List lists all CephClusters in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephCluster, err error) - // Get retrieves the CephCluster from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephCluster, error) - CephClusterNamespaceListerExpansion -} - -// cephClusterNamespaceLister implements the CephClusterNamespaceLister -// interface. -type cephClusterNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephClusters in the indexer for a given namespace. -func (s cephClusterNamespaceLister) List(selector labels.Selector) (ret []*v1.CephCluster, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephCluster)) - }) - return ret, err -} - -// Get retrieves the CephCluster from the indexer for a given namespace and name. -func (s cephClusterNamespaceLister) Get(name string) (*v1.CephCluster, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephcluster"), name) - } - return obj.(*v1.CephCluster), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephfilesystem.go b/pkg/client/listers/ceph.rook.io/v1/cephfilesystem.go deleted file mode 100644 index c581c5d5d..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephfilesystem.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephFilesystemLister helps list CephFilesystems. -// All objects returned here must be treated as read-only. -type CephFilesystemLister interface { - // List lists all CephFilesystems in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephFilesystem, err error) - // CephFilesystems returns an object that can list and get CephFilesystems. - CephFilesystems(namespace string) CephFilesystemNamespaceLister - CephFilesystemListerExpansion -} - -// cephFilesystemLister implements the CephFilesystemLister interface. -type cephFilesystemLister struct { - indexer cache.Indexer -} - -// NewCephFilesystemLister returns a new CephFilesystemLister. -func NewCephFilesystemLister(indexer cache.Indexer) CephFilesystemLister { - return &cephFilesystemLister{indexer: indexer} -} - -// List lists all CephFilesystems in the indexer. -func (s *cephFilesystemLister) List(selector labels.Selector) (ret []*v1.CephFilesystem, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephFilesystem)) - }) - return ret, err -} - -// CephFilesystems returns an object that can list and get CephFilesystems. -func (s *cephFilesystemLister) CephFilesystems(namespace string) CephFilesystemNamespaceLister { - return cephFilesystemNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephFilesystemNamespaceLister helps list and get CephFilesystems. -// All objects returned here must be treated as read-only. -type CephFilesystemNamespaceLister interface { - // List lists all CephFilesystems in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephFilesystem, err error) - // Get retrieves the CephFilesystem from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephFilesystem, error) - CephFilesystemNamespaceListerExpansion -} - -// cephFilesystemNamespaceLister implements the CephFilesystemNamespaceLister -// interface. -type cephFilesystemNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephFilesystems in the indexer for a given namespace. -func (s cephFilesystemNamespaceLister) List(selector labels.Selector) (ret []*v1.CephFilesystem, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephFilesystem)) - }) - return ret, err -} - -// Get retrieves the CephFilesystem from the indexer for a given namespace and name. -func (s cephFilesystemNamespaceLister) Get(name string) (*v1.CephFilesystem, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephfilesystem"), name) - } - return obj.(*v1.CephFilesystem), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephfilesystemmirror.go b/pkg/client/listers/ceph.rook.io/v1/cephfilesystemmirror.go deleted file mode 100644 index a686b27e6..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephfilesystemmirror.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephFilesystemMirrorLister helps list CephFilesystemMirrors. -// All objects returned here must be treated as read-only. -type CephFilesystemMirrorLister interface { - // List lists all CephFilesystemMirrors in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephFilesystemMirror, err error) - // CephFilesystemMirrors returns an object that can list and get CephFilesystemMirrors. - CephFilesystemMirrors(namespace string) CephFilesystemMirrorNamespaceLister - CephFilesystemMirrorListerExpansion -} - -// cephFilesystemMirrorLister implements the CephFilesystemMirrorLister interface. -type cephFilesystemMirrorLister struct { - indexer cache.Indexer -} - -// NewCephFilesystemMirrorLister returns a new CephFilesystemMirrorLister. -func NewCephFilesystemMirrorLister(indexer cache.Indexer) CephFilesystemMirrorLister { - return &cephFilesystemMirrorLister{indexer: indexer} -} - -// List lists all CephFilesystemMirrors in the indexer. -func (s *cephFilesystemMirrorLister) List(selector labels.Selector) (ret []*v1.CephFilesystemMirror, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephFilesystemMirror)) - }) - return ret, err -} - -// CephFilesystemMirrors returns an object that can list and get CephFilesystemMirrors. -func (s *cephFilesystemMirrorLister) CephFilesystemMirrors(namespace string) CephFilesystemMirrorNamespaceLister { - return cephFilesystemMirrorNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephFilesystemMirrorNamespaceLister helps list and get CephFilesystemMirrors. -// All objects returned here must be treated as read-only. -type CephFilesystemMirrorNamespaceLister interface { - // List lists all CephFilesystemMirrors in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephFilesystemMirror, err error) - // Get retrieves the CephFilesystemMirror from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephFilesystemMirror, error) - CephFilesystemMirrorNamespaceListerExpansion -} - -// cephFilesystemMirrorNamespaceLister implements the CephFilesystemMirrorNamespaceLister -// interface. -type cephFilesystemMirrorNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephFilesystemMirrors in the indexer for a given namespace. -func (s cephFilesystemMirrorNamespaceLister) List(selector labels.Selector) (ret []*v1.CephFilesystemMirror, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephFilesystemMirror)) - }) - return ret, err -} - -// Get retrieves the CephFilesystemMirror from the indexer for a given namespace and name. -func (s cephFilesystemMirrorNamespaceLister) Get(name string) (*v1.CephFilesystemMirror, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephfilesystemmirror"), name) - } - return obj.(*v1.CephFilesystemMirror), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephnfs.go b/pkg/client/listers/ceph.rook.io/v1/cephnfs.go deleted file mode 100644 index 5834cdc7a..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephnfs.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephNFSLister helps list CephNFSes. -// All objects returned here must be treated as read-only. -type CephNFSLister interface { - // List lists all CephNFSes in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephNFS, err error) - // CephNFSes returns an object that can list and get CephNFSes. - CephNFSes(namespace string) CephNFSNamespaceLister - CephNFSListerExpansion -} - -// cephNFSLister implements the CephNFSLister interface. -type cephNFSLister struct { - indexer cache.Indexer -} - -// NewCephNFSLister returns a new CephNFSLister. -func NewCephNFSLister(indexer cache.Indexer) CephNFSLister { - return &cephNFSLister{indexer: indexer} -} - -// List lists all CephNFSes in the indexer. -func (s *cephNFSLister) List(selector labels.Selector) (ret []*v1.CephNFS, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephNFS)) - }) - return ret, err -} - -// CephNFSes returns an object that can list and get CephNFSes. -func (s *cephNFSLister) CephNFSes(namespace string) CephNFSNamespaceLister { - return cephNFSNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephNFSNamespaceLister helps list and get CephNFSes. -// All objects returned here must be treated as read-only. -type CephNFSNamespaceLister interface { - // List lists all CephNFSes in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephNFS, err error) - // Get retrieves the CephNFS from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephNFS, error) - CephNFSNamespaceListerExpansion -} - -// cephNFSNamespaceLister implements the CephNFSNamespaceLister -// interface. -type cephNFSNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephNFSes in the indexer for a given namespace. -func (s cephNFSNamespaceLister) List(selector labels.Selector) (ret []*v1.CephNFS, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephNFS)) - }) - return ret, err -} - -// Get retrieves the CephNFS from the indexer for a given namespace and name. -func (s cephNFSNamespaceLister) Get(name string) (*v1.CephNFS, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephnfs"), name) - } - return obj.(*v1.CephNFS), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephobjectrealm.go b/pkg/client/listers/ceph.rook.io/v1/cephobjectrealm.go deleted file mode 100644 index bd6cc248d..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephobjectrealm.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephObjectRealmLister helps list CephObjectRealms. -// All objects returned here must be treated as read-only. -type CephObjectRealmLister interface { - // List lists all CephObjectRealms in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephObjectRealm, err error) - // CephObjectRealms returns an object that can list and get CephObjectRealms. - CephObjectRealms(namespace string) CephObjectRealmNamespaceLister - CephObjectRealmListerExpansion -} - -// cephObjectRealmLister implements the CephObjectRealmLister interface. -type cephObjectRealmLister struct { - indexer cache.Indexer -} - -// NewCephObjectRealmLister returns a new CephObjectRealmLister. -func NewCephObjectRealmLister(indexer cache.Indexer) CephObjectRealmLister { - return &cephObjectRealmLister{indexer: indexer} -} - -// List lists all CephObjectRealms in the indexer. -func (s *cephObjectRealmLister) List(selector labels.Selector) (ret []*v1.CephObjectRealm, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephObjectRealm)) - }) - return ret, err -} - -// CephObjectRealms returns an object that can list and get CephObjectRealms. -func (s *cephObjectRealmLister) CephObjectRealms(namespace string) CephObjectRealmNamespaceLister { - return cephObjectRealmNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephObjectRealmNamespaceLister helps list and get CephObjectRealms. -// All objects returned here must be treated as read-only. -type CephObjectRealmNamespaceLister interface { - // List lists all CephObjectRealms in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephObjectRealm, err error) - // Get retrieves the CephObjectRealm from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephObjectRealm, error) - CephObjectRealmNamespaceListerExpansion -} - -// cephObjectRealmNamespaceLister implements the CephObjectRealmNamespaceLister -// interface. -type cephObjectRealmNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephObjectRealms in the indexer for a given namespace. -func (s cephObjectRealmNamespaceLister) List(selector labels.Selector) (ret []*v1.CephObjectRealm, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephObjectRealm)) - }) - return ret, err -} - -// Get retrieves the CephObjectRealm from the indexer for a given namespace and name. -func (s cephObjectRealmNamespaceLister) Get(name string) (*v1.CephObjectRealm, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephobjectrealm"), name) - } - return obj.(*v1.CephObjectRealm), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephobjectstore.go b/pkg/client/listers/ceph.rook.io/v1/cephobjectstore.go deleted file mode 100644 index eae500108..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephobjectstore.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephObjectStoreLister helps list CephObjectStores. -// All objects returned here must be treated as read-only. -type CephObjectStoreLister interface { - // List lists all CephObjectStores in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephObjectStore, err error) - // CephObjectStores returns an object that can list and get CephObjectStores. - CephObjectStores(namespace string) CephObjectStoreNamespaceLister - CephObjectStoreListerExpansion -} - -// cephObjectStoreLister implements the CephObjectStoreLister interface. -type cephObjectStoreLister struct { - indexer cache.Indexer -} - -// NewCephObjectStoreLister returns a new CephObjectStoreLister. -func NewCephObjectStoreLister(indexer cache.Indexer) CephObjectStoreLister { - return &cephObjectStoreLister{indexer: indexer} -} - -// List lists all CephObjectStores in the indexer. -func (s *cephObjectStoreLister) List(selector labels.Selector) (ret []*v1.CephObjectStore, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephObjectStore)) - }) - return ret, err -} - -// CephObjectStores returns an object that can list and get CephObjectStores. -func (s *cephObjectStoreLister) CephObjectStores(namespace string) CephObjectStoreNamespaceLister { - return cephObjectStoreNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephObjectStoreNamespaceLister helps list and get CephObjectStores. -// All objects returned here must be treated as read-only. -type CephObjectStoreNamespaceLister interface { - // List lists all CephObjectStores in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephObjectStore, err error) - // Get retrieves the CephObjectStore from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephObjectStore, error) - CephObjectStoreNamespaceListerExpansion -} - -// cephObjectStoreNamespaceLister implements the CephObjectStoreNamespaceLister -// interface. -type cephObjectStoreNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephObjectStores in the indexer for a given namespace. -func (s cephObjectStoreNamespaceLister) List(selector labels.Selector) (ret []*v1.CephObjectStore, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephObjectStore)) - }) - return ret, err -} - -// Get retrieves the CephObjectStore from the indexer for a given namespace and name. -func (s cephObjectStoreNamespaceLister) Get(name string) (*v1.CephObjectStore, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephobjectstore"), name) - } - return obj.(*v1.CephObjectStore), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephobjectstoreuser.go b/pkg/client/listers/ceph.rook.io/v1/cephobjectstoreuser.go deleted file mode 100644 index 7def59fa6..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephobjectstoreuser.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephObjectStoreUserLister helps list CephObjectStoreUsers. -// All objects returned here must be treated as read-only. -type CephObjectStoreUserLister interface { - // List lists all CephObjectStoreUsers in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephObjectStoreUser, err error) - // CephObjectStoreUsers returns an object that can list and get CephObjectStoreUsers. - CephObjectStoreUsers(namespace string) CephObjectStoreUserNamespaceLister - CephObjectStoreUserListerExpansion -} - -// cephObjectStoreUserLister implements the CephObjectStoreUserLister interface. -type cephObjectStoreUserLister struct { - indexer cache.Indexer -} - -// NewCephObjectStoreUserLister returns a new CephObjectStoreUserLister. -func NewCephObjectStoreUserLister(indexer cache.Indexer) CephObjectStoreUserLister { - return &cephObjectStoreUserLister{indexer: indexer} -} - -// List lists all CephObjectStoreUsers in the indexer. -func (s *cephObjectStoreUserLister) List(selector labels.Selector) (ret []*v1.CephObjectStoreUser, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephObjectStoreUser)) - }) - return ret, err -} - -// CephObjectStoreUsers returns an object that can list and get CephObjectStoreUsers. -func (s *cephObjectStoreUserLister) CephObjectStoreUsers(namespace string) CephObjectStoreUserNamespaceLister { - return cephObjectStoreUserNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephObjectStoreUserNamespaceLister helps list and get CephObjectStoreUsers. -// All objects returned here must be treated as read-only. -type CephObjectStoreUserNamespaceLister interface { - // List lists all CephObjectStoreUsers in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephObjectStoreUser, err error) - // Get retrieves the CephObjectStoreUser from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephObjectStoreUser, error) - CephObjectStoreUserNamespaceListerExpansion -} - -// cephObjectStoreUserNamespaceLister implements the CephObjectStoreUserNamespaceLister -// interface. -type cephObjectStoreUserNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephObjectStoreUsers in the indexer for a given namespace. -func (s cephObjectStoreUserNamespaceLister) List(selector labels.Selector) (ret []*v1.CephObjectStoreUser, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephObjectStoreUser)) - }) - return ret, err -} - -// Get retrieves the CephObjectStoreUser from the indexer for a given namespace and name. -func (s cephObjectStoreUserNamespaceLister) Get(name string) (*v1.CephObjectStoreUser, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephobjectstoreuser"), name) - } - return obj.(*v1.CephObjectStoreUser), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephobjectzone.go b/pkg/client/listers/ceph.rook.io/v1/cephobjectzone.go deleted file mode 100644 index 6f236f81e..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephobjectzone.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephObjectZoneLister helps list CephObjectZones. -// All objects returned here must be treated as read-only. -type CephObjectZoneLister interface { - // List lists all CephObjectZones in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephObjectZone, err error) - // CephObjectZones returns an object that can list and get CephObjectZones. - CephObjectZones(namespace string) CephObjectZoneNamespaceLister - CephObjectZoneListerExpansion -} - -// cephObjectZoneLister implements the CephObjectZoneLister interface. -type cephObjectZoneLister struct { - indexer cache.Indexer -} - -// NewCephObjectZoneLister returns a new CephObjectZoneLister. -func NewCephObjectZoneLister(indexer cache.Indexer) CephObjectZoneLister { - return &cephObjectZoneLister{indexer: indexer} -} - -// List lists all CephObjectZones in the indexer. -func (s *cephObjectZoneLister) List(selector labels.Selector) (ret []*v1.CephObjectZone, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephObjectZone)) - }) - return ret, err -} - -// CephObjectZones returns an object that can list and get CephObjectZones. -func (s *cephObjectZoneLister) CephObjectZones(namespace string) CephObjectZoneNamespaceLister { - return cephObjectZoneNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephObjectZoneNamespaceLister helps list and get CephObjectZones. -// All objects returned here must be treated as read-only. -type CephObjectZoneNamespaceLister interface { - // List lists all CephObjectZones in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephObjectZone, err error) - // Get retrieves the CephObjectZone from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephObjectZone, error) - CephObjectZoneNamespaceListerExpansion -} - -// cephObjectZoneNamespaceLister implements the CephObjectZoneNamespaceLister -// interface. -type cephObjectZoneNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephObjectZones in the indexer for a given namespace. -func (s cephObjectZoneNamespaceLister) List(selector labels.Selector) (ret []*v1.CephObjectZone, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephObjectZone)) - }) - return ret, err -} - -// Get retrieves the CephObjectZone from the indexer for a given namespace and name. -func (s cephObjectZoneNamespaceLister) Get(name string) (*v1.CephObjectZone, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephobjectzone"), name) - } - return obj.(*v1.CephObjectZone), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephobjectzonegroup.go b/pkg/client/listers/ceph.rook.io/v1/cephobjectzonegroup.go deleted file mode 100644 index 0253487f8..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephobjectzonegroup.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephObjectZoneGroupLister helps list CephObjectZoneGroups. -// All objects returned here must be treated as read-only. -type CephObjectZoneGroupLister interface { - // List lists all CephObjectZoneGroups in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephObjectZoneGroup, err error) - // CephObjectZoneGroups returns an object that can list and get CephObjectZoneGroups. - CephObjectZoneGroups(namespace string) CephObjectZoneGroupNamespaceLister - CephObjectZoneGroupListerExpansion -} - -// cephObjectZoneGroupLister implements the CephObjectZoneGroupLister interface. -type cephObjectZoneGroupLister struct { - indexer cache.Indexer -} - -// NewCephObjectZoneGroupLister returns a new CephObjectZoneGroupLister. -func NewCephObjectZoneGroupLister(indexer cache.Indexer) CephObjectZoneGroupLister { - return &cephObjectZoneGroupLister{indexer: indexer} -} - -// List lists all CephObjectZoneGroups in the indexer. -func (s *cephObjectZoneGroupLister) List(selector labels.Selector) (ret []*v1.CephObjectZoneGroup, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephObjectZoneGroup)) - }) - return ret, err -} - -// CephObjectZoneGroups returns an object that can list and get CephObjectZoneGroups. -func (s *cephObjectZoneGroupLister) CephObjectZoneGroups(namespace string) CephObjectZoneGroupNamespaceLister { - return cephObjectZoneGroupNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephObjectZoneGroupNamespaceLister helps list and get CephObjectZoneGroups. -// All objects returned here must be treated as read-only. -type CephObjectZoneGroupNamespaceLister interface { - // List lists all CephObjectZoneGroups in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephObjectZoneGroup, err error) - // Get retrieves the CephObjectZoneGroup from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephObjectZoneGroup, error) - CephObjectZoneGroupNamespaceListerExpansion -} - -// cephObjectZoneGroupNamespaceLister implements the CephObjectZoneGroupNamespaceLister -// interface. -type cephObjectZoneGroupNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephObjectZoneGroups in the indexer for a given namespace. -func (s cephObjectZoneGroupNamespaceLister) List(selector labels.Selector) (ret []*v1.CephObjectZoneGroup, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephObjectZoneGroup)) - }) - return ret, err -} - -// Get retrieves the CephObjectZoneGroup from the indexer for a given namespace and name. -func (s cephObjectZoneGroupNamespaceLister) Get(name string) (*v1.CephObjectZoneGroup, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephobjectzonegroup"), name) - } - return obj.(*v1.CephObjectZoneGroup), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/cephrbdmirror.go b/pkg/client/listers/ceph.rook.io/v1/cephrbdmirror.go deleted file mode 100644 index dc8e2d5e0..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/cephrbdmirror.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CephRBDMirrorLister helps list CephRBDMirrors. -// All objects returned here must be treated as read-only. -type CephRBDMirrorLister interface { - // List lists all CephRBDMirrors in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephRBDMirror, err error) - // CephRBDMirrors returns an object that can list and get CephRBDMirrors. - CephRBDMirrors(namespace string) CephRBDMirrorNamespaceLister - CephRBDMirrorListerExpansion -} - -// cephRBDMirrorLister implements the CephRBDMirrorLister interface. -type cephRBDMirrorLister struct { - indexer cache.Indexer -} - -// NewCephRBDMirrorLister returns a new CephRBDMirrorLister. -func NewCephRBDMirrorLister(indexer cache.Indexer) CephRBDMirrorLister { - return &cephRBDMirrorLister{indexer: indexer} -} - -// List lists all CephRBDMirrors in the indexer. -func (s *cephRBDMirrorLister) List(selector labels.Selector) (ret []*v1.CephRBDMirror, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephRBDMirror)) - }) - return ret, err -} - -// CephRBDMirrors returns an object that can list and get CephRBDMirrors. -func (s *cephRBDMirrorLister) CephRBDMirrors(namespace string) CephRBDMirrorNamespaceLister { - return cephRBDMirrorNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// CephRBDMirrorNamespaceLister helps list and get CephRBDMirrors. -// All objects returned here must be treated as read-only. -type CephRBDMirrorNamespaceLister interface { - // List lists all CephRBDMirrors in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CephRBDMirror, err error) - // Get retrieves the CephRBDMirror from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.CephRBDMirror, error) - CephRBDMirrorNamespaceListerExpansion -} - -// cephRBDMirrorNamespaceLister implements the CephRBDMirrorNamespaceLister -// interface. -type cephRBDMirrorNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CephRBDMirrors in the indexer for a given namespace. -func (s cephRBDMirrorNamespaceLister) List(selector labels.Selector) (ret []*v1.CephRBDMirror, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CephRBDMirror)) - }) - return ret, err -} - -// Get retrieves the CephRBDMirror from the indexer for a given namespace and name. -func (s cephRBDMirrorNamespaceLister) Get(name string) (*v1.CephRBDMirror, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cephrbdmirror"), name) - } - return obj.(*v1.CephRBDMirror), nil -} diff --git a/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go b/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go deleted file mode 100644 index 458e22da9..000000000 --- a/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -// CephBlockPoolListerExpansion allows custom methods to be added to -// CephBlockPoolLister. -type CephBlockPoolListerExpansion interface{} - -// CephBlockPoolNamespaceListerExpansion allows custom methods to be added to -// CephBlockPoolNamespaceLister. -type CephBlockPoolNamespaceListerExpansion interface{} - -// CephClientListerExpansion allows custom methods to be added to -// CephClientLister. -type CephClientListerExpansion interface{} - -// CephClientNamespaceListerExpansion allows custom methods to be added to -// CephClientNamespaceLister. -type CephClientNamespaceListerExpansion interface{} - -// CephClusterListerExpansion allows custom methods to be added to -// CephClusterLister. -type CephClusterListerExpansion interface{} - -// CephClusterNamespaceListerExpansion allows custom methods to be added to -// CephClusterNamespaceLister. -type CephClusterNamespaceListerExpansion interface{} - -// CephFilesystemListerExpansion allows custom methods to be added to -// CephFilesystemLister. -type CephFilesystemListerExpansion interface{} - -// CephFilesystemNamespaceListerExpansion allows custom methods to be added to -// CephFilesystemNamespaceLister. -type CephFilesystemNamespaceListerExpansion interface{} - -// CephFilesystemMirrorListerExpansion allows custom methods to be added to -// CephFilesystemMirrorLister. -type CephFilesystemMirrorListerExpansion interface{} - -// CephFilesystemMirrorNamespaceListerExpansion allows custom methods to be added to -// CephFilesystemMirrorNamespaceLister. -type CephFilesystemMirrorNamespaceListerExpansion interface{} - -// CephNFSListerExpansion allows custom methods to be added to -// CephNFSLister. -type CephNFSListerExpansion interface{} - -// CephNFSNamespaceListerExpansion allows custom methods to be added to -// CephNFSNamespaceLister. -type CephNFSNamespaceListerExpansion interface{} - -// CephObjectRealmListerExpansion allows custom methods to be added to -// CephObjectRealmLister. -type CephObjectRealmListerExpansion interface{} - -// CephObjectRealmNamespaceListerExpansion allows custom methods to be added to -// CephObjectRealmNamespaceLister. -type CephObjectRealmNamespaceListerExpansion interface{} - -// CephObjectStoreListerExpansion allows custom methods to be added to -// CephObjectStoreLister. -type CephObjectStoreListerExpansion interface{} - -// CephObjectStoreNamespaceListerExpansion allows custom methods to be added to -// CephObjectStoreNamespaceLister. -type CephObjectStoreNamespaceListerExpansion interface{} - -// CephObjectStoreUserListerExpansion allows custom methods to be added to -// CephObjectStoreUserLister. -type CephObjectStoreUserListerExpansion interface{} - -// CephObjectStoreUserNamespaceListerExpansion allows custom methods to be added to -// CephObjectStoreUserNamespaceLister. -type CephObjectStoreUserNamespaceListerExpansion interface{} - -// CephObjectZoneListerExpansion allows custom methods to be added to -// CephObjectZoneLister. -type CephObjectZoneListerExpansion interface{} - -// CephObjectZoneNamespaceListerExpansion allows custom methods to be added to -// CephObjectZoneNamespaceLister. -type CephObjectZoneNamespaceListerExpansion interface{} - -// CephObjectZoneGroupListerExpansion allows custom methods to be added to -// CephObjectZoneGroupLister. -type CephObjectZoneGroupListerExpansion interface{} - -// CephObjectZoneGroupNamespaceListerExpansion allows custom methods to be added to -// CephObjectZoneGroupNamespaceLister. -type CephObjectZoneGroupNamespaceListerExpansion interface{} - -// CephRBDMirrorListerExpansion allows custom methods to be added to -// CephRBDMirrorLister. -type CephRBDMirrorListerExpansion interface{} - -// CephRBDMirrorNamespaceListerExpansion allows custom methods to be added to -// CephRBDMirrorNamespaceLister. -type CephRBDMirrorNamespaceListerExpansion interface{} diff --git a/pkg/client/listers/nfs.rook.io/v1alpha1/expansion_generated.go b/pkg/client/listers/nfs.rook.io/v1alpha1/expansion_generated.go deleted file mode 100644 index b89229e62..000000000 --- a/pkg/client/listers/nfs.rook.io/v1alpha1/expansion_generated.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -// NFSServerListerExpansion allows custom methods to be added to -// NFSServerLister. -type NFSServerListerExpansion interface{} - -// NFSServerNamespaceListerExpansion allows custom methods to be added to -// NFSServerNamespaceLister. -type NFSServerNamespaceListerExpansion interface{} diff --git a/pkg/client/listers/nfs.rook.io/v1alpha1/nfsserver.go b/pkg/client/listers/nfs.rook.io/v1alpha1/nfsserver.go deleted file mode 100644 index f26f51d09..000000000 --- a/pkg/client/listers/nfs.rook.io/v1alpha1/nfsserver.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// NFSServerLister helps list NFSServers. -// All objects returned here must be treated as read-only. -type NFSServerLister interface { - // List lists all NFSServers in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.NFSServer, err error) - // NFSServers returns an object that can list and get NFSServers. - NFSServers(namespace string) NFSServerNamespaceLister - NFSServerListerExpansion -} - -// nFSServerLister implements the NFSServerLister interface. -type nFSServerLister struct { - indexer cache.Indexer -} - -// NewNFSServerLister returns a new NFSServerLister. -func NewNFSServerLister(indexer cache.Indexer) NFSServerLister { - return &nFSServerLister{indexer: indexer} -} - -// List lists all NFSServers in the indexer. -func (s *nFSServerLister) List(selector labels.Selector) (ret []*v1alpha1.NFSServer, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.NFSServer)) - }) - return ret, err -} - -// NFSServers returns an object that can list and get NFSServers. -func (s *nFSServerLister) NFSServers(namespace string) NFSServerNamespaceLister { - return nFSServerNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// NFSServerNamespaceLister helps list and get NFSServers. -// All objects returned here must be treated as read-only. -type NFSServerNamespaceLister interface { - // List lists all NFSServers in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.NFSServer, err error) - // Get retrieves the NFSServer from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.NFSServer, error) - NFSServerNamespaceListerExpansion -} - -// nFSServerNamespaceLister implements the NFSServerNamespaceLister -// interface. -type nFSServerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all NFSServers in the indexer for a given namespace. -func (s nFSServerNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.NFSServer, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.NFSServer)) - }) - return ret, err -} - -// Get retrieves the NFSServer from the indexer for a given namespace and name. -func (s nFSServerNamespaceLister) Get(name string) (*v1alpha1.NFSServer, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("nfsserver"), name) - } - return obj.(*v1alpha1.NFSServer), nil -} diff --git a/pkg/client/listers/rook.io/v1alpha2/volume.go b/pkg/client/listers/rook.io/v1alpha2/volume.go index fd799c636..eda7f07ed 100644 --- a/pkg/client/listers/rook.io/v1alpha2/volume.go +++ b/pkg/client/listers/rook.io/v1alpha2/volume.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha2 import ( - v1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" + v1alpha2 "github.com/rook/cassandra/pkg/apis/rook.io/v1alpha2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/clusterd/context.go b/pkg/clusterd/context.go index 3a9599abb..fde50ab53 100644 --- a/pkg/clusterd/context.go +++ b/pkg/clusterd/context.go @@ -19,9 +19,9 @@ package clusterd import ( "github.com/coreos/pkg/capnslog" netclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned" - "github.com/rook/rook/pkg/util/exec" - "github.com/rook/rook/pkg/util/sys" + rookclient "github.com/rook/cassandra/pkg/client/clientset/versioned" + "github.com/rook/cassandra/pkg/util/exec" + "github.com/rook/cassandra/pkg/util/sys" "github.com/tevino/abool" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/dynamic" @@ -66,9 +66,6 @@ type Context struct { // The full path to a config file that can be used to override generated settings ConfigFileOverride string - // Information about the network for this machine and its cluster - NetworkInfo NetworkInfo - // NetworkClient is a connection to the CNI plugin API NetworkClient netclient.K8sCniCncfIoV1Interface diff --git a/pkg/clusterd/disk.go b/pkg/clusterd/disk.go index 264c6997c..0e29a69bd 100644 --- a/pkg/clusterd/disk.go +++ b/pkg/clusterd/disk.go @@ -25,12 +25,12 @@ import ( "github.com/coreos/pkg/capnslog" - "github.com/rook/rook/pkg/util/exec" - "github.com/rook/rook/pkg/util/sys" + "github.com/rook/cassandra/pkg/util/exec" + "github.com/rook/cassandra/pkg/util/sys" ) var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "inventory") + logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "inventory") isRBD = regexp.MustCompile("^rbd[0-9]+p?[0-9]{0,}$") ) diff --git a/pkg/clusterd/disk_test.go b/pkg/clusterd/disk_test.go index c1ec2e404..af9e6283d 100644 --- a/pkg/clusterd/disk_test.go +++ b/pkg/clusterd/disk_test.go @@ -18,7 +18,7 @@ package clusterd import ( "testing" - exectest "github.com/rook/rook/pkg/util/exec/test" + exectest "github.com/rook/cassandra/pkg/util/exec/test" "github.com/stretchr/testify/assert" ) diff --git a/pkg/clusterd/network.go b/pkg/clusterd/network.go deleted file mode 100644 index 3efdf80de..000000000 --- a/pkg/clusterd/network.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package clusterd - -import ( - "fmt" - "net" -) - -type NetworkInfo struct { - PublicAddr string - ClusterAddr string - PublicNetwork string // public network and subnet mask in CIDR notation - ClusterNetwork string // cluster network and subnet mask in CIDR notation - - // deprecated ipv4 format address - // TODO: remove these legacy fields in the future - PublicAddrIPv4 string - ClusterAddrIPv4 string -} - -// Simplify adapts deprecated fields -// TODO: remove this function in the future -func (in NetworkInfo) Simplify() NetworkInfo { - out := NetworkInfo{ - PublicNetwork: in.PublicNetwork, - ClusterNetwork: in.ClusterNetwork, - } - if in.PublicAddr != "" { - out.PublicAddr = in.PublicAddr - } else { - out.PublicAddr = in.PublicAddrIPv4 - } - - if in.ClusterAddr != "" { - out.ClusterAddr = in.ClusterAddr - } else { - out.ClusterAddr = in.ClusterAddrIPv4 - } - return out -} - -func VerifyNetworkInfo(networkInfo NetworkInfo) error { - if err := verifyIPAddr(networkInfo.PublicAddr); err != nil { - return err - } - - if err := verifyIPAddr(networkInfo.ClusterAddr); err != nil { - return err - } - - if err := verifyIPNetwork(networkInfo.PublicNetwork); err != nil { - return err - } - - if err := verifyIPNetwork(networkInfo.ClusterNetwork); err != nil { - return err - } - - return nil -} - -func verifyIPAddr(addr string) error { - if addr == "" { - // empty strings are OK - return nil - } - - if net.ParseIP(addr) == nil { - return fmt.Errorf("failed to parse IP address %s", addr) - } - - return nil -} - -func verifyIPNetwork(network string) error { - if network == "" { - // empty strings are OK - return nil - } - - _, _, err := net.ParseCIDR(network) - return err -} diff --git a/pkg/clusterd/network_test.go b/pkg/clusterd/network_test.go deleted file mode 100644 index 9a2433faa..000000000 --- a/pkg/clusterd/network_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package clusterd - -import "testing" -import "github.com/stretchr/testify/assert" - -func TestVerifyNetworkInfo(t *testing.T) { - // empty network info is OK - var networkInfo NetworkInfo - err := VerifyNetworkInfo(networkInfo) - assert.Nil(t, err) - - // well formed network info is OK - networkInfo = NetworkInfo{ - PublicAddr: "10.1.1.1", - PublicNetwork: "10.1.1.0/24", - ClusterAddr: "10.1.2.2", - ClusterNetwork: "10.1.2.0/24", - } - err = VerifyNetworkInfo(networkInfo) - assert.Nil(t, err) - - // malformed IP address is not OK - networkInfo = NetworkInfo{ - PublicAddr: "10.1.1.256", - PublicNetwork: "10.1.1.0/24", - ClusterAddr: "10.1.2.256", - ClusterNetwork: "10.1.2.0/24", - } - err = VerifyNetworkInfo(networkInfo) - assert.NotNil(t, err) - - // malformed network address is not OK - networkInfo = NetworkInfo{ - PublicAddr: "10.1.1.1", - PublicNetwork: "10.1.1.0/33", - ClusterAddr: "10.1.2.2", - ClusterNetwork: "10.1.2.0/33", - } - err = VerifyNetworkInfo(networkInfo) - assert.NotNil(t, err) -} - -func TestNetworkInfoSimplify(t *testing.T) { - - out := NetworkInfo{ - PublicAddr: "10.1.1.1", - PublicNetwork: "10.1.1.0/24", - ClusterAddr: "10.1.2.2", - ClusterNetwork: "10.1.2.0/24", - } - - // only has old fields - in := NetworkInfo{ - PublicAddrIPv4: "10.1.1.1", - PublicNetwork: "10.1.1.0/24", - ClusterAddrIPv4: "10.1.2.2", - ClusterNetwork: "10.1.2.0/24", - } - assert.Equal(t, out, in.Simplify()) - - // has both new and old fields - in = NetworkInfo{ - PublicAddr: "10.1.1.1", - PublicAddrIPv4: "10.9.1.1", - PublicNetwork: "10.1.1.0/24", - ClusterAddr: "10.1.2.2", - ClusterAddrIPv4: "10.9.2.2", - ClusterNetwork: "10.1.2.0/24", - } - assert.Equal(t, out, in.Simplify()) - -} diff --git a/pkg/daemon/ceph/agent/agent.go b/pkg/daemon/ceph/agent/agent.go deleted file mode 100644 index 8eb27a064..000000000 --- a/pkg/daemon/ceph/agent/agent.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package agent to manage Kubernetes storage attach events. -package agent - -import ( - "net/rpc" - "os" - "os/signal" - "syscall" - "time" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/cluster" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/manager/ceph" - "github.com/rook/rook/pkg/operator/ceph/agent" - v1 "k8s.io/api/core/v1" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "rook-ceph-agent") - -// Agent represent all the references needed to manage a Rook agent -type Agent struct { - context *clusterd.Context -} - -// New creates an Agent instance -func New(context *clusterd.Context) *Agent { - return &Agent{context: context} -} - -// Run the agent -func (a *Agent) Run() error { - volumeAttachmentController, err := attachment.New(a.context) - if err != nil { - return errors.Wrap(err, "failed to create volume attachment controller") - } - - volumeManager, err := ceph.NewVolumeManager(a.context) - if err != nil { - return errors.Wrap(err, "failed to create volume manager") - } - - mountSecurityMode := os.Getenv(agent.AgentMountSecurityModeEnv) - // Don't check if it is not empty because the operator always sets it on the DaemonSet - // meaning if it is not set, there is something wrong thus return an error. - if mountSecurityMode == "" { - return errors.New("no mount security mode env var found on the agent, have you upgraded your Rook operator correctly?") - } - - flexvolumeController := flexvolume.NewController(a.context, volumeAttachmentController, volumeManager, mountSecurityMode) - - flexvolumeServer := flexvolume.NewFlexvolumeServer( - a.context, - flexvolumeController, - ) - - err = rpc.Register(flexvolumeController) - if err != nil { - return errors.Wrap(err, "unable to register rpc") - } - - driverName, err := flexvolume.RookDriverName(a.context) - if err != nil { - return errors.Wrap(err, "failed to get driver name") - } - - flexDriverVendors := []string{flexvolume.FlexvolumeVendor, flexvolume.FlexvolumeVendorLegacy} - for i, vendor := range flexDriverVendors { - if i > 0 { - // Wait before the next driver is registered. In 1.11 and newer there is a timing issue if flex drivers are registered too quickly. - // See https://github.com/rook/rook/issues/1501 and https://github.com/kubernetes/kubernetes/issues/60694 - time.Sleep(time.Second) - } - - err = flexvolumeServer.Start(vendor, driverName) - if err != nil { - return errors.Wrapf(err, "failed to start flex volume server %s/%s", vendor, driverName) - } - - // Wait before the next driver is registered - time.Sleep(time.Second) - - // Register drivers both with the name of the namespace and the name "rook" - // for the volume plugins not based on the namespace. - err = flexvolumeServer.Start(vendor, flexvolume.FlexDriverName) - if err != nil { - return errors.Wrapf(err, "failed to start flex volume server %s/%s", vendor, flexvolume.FlexDriverName) - } - } - - // create a cluster controller and tell it to start watching for changes to clusters - clusterController := cluster.NewClusterController( - a.context, - flexvolumeController, - volumeAttachmentController) - stopChan := make(chan struct{}) - clusterController.StartWatch(v1.NamespaceAll, stopChan) - go periodicallyRefreshFlexDrivers(driverName, stopChan) - - sigc := make(chan os.Signal, 1) - signal.Notify(sigc, syscall.SIGTERM) - - <-sigc - logger.Infof("shutdown signal received, exiting...") - flexvolumeServer.StopAll() - close(stopChan) - return nil -} - -// In 1.11 and newer there is a timing issue loading flex drivers. -// See https://github.com/rook/rook/issues/1501 and https://github.com/kubernetes/kubernetes/issues/60694 -// With this loop we constantly make sure the flex drivers are all loaded. -func periodicallyRefreshFlexDrivers(driverName string, stopCh chan struct{}) { - waitTime := 2 * time.Minute - for { - logger.Debugf("waiting %s before refreshing flex", waitTime.String()) - select { - case <-time.After(waitTime): - flexvolume.TouchFlexDrivers(flexvolume.FlexvolumeVendor, driverName) - - // increase the wait time after the first few times we refresh - // at most the delay will be 32 minutes between each refresh of the flex drivers - if waitTime < 32*time.Minute { - waitTime = waitTime * 2 - } - break - case <-stopCh: - logger.Infof("stopping flex driver refresh goroutine") - return - } - } -} diff --git a/pkg/daemon/ceph/agent/cluster/controller.go b/pkg/daemon/ceph/agent/cluster/controller.go deleted file mode 100644 index b9307db4a..000000000 --- a/pkg/daemon/ceph/agent/cluster/controller.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "os" - "sync" - "time" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - - "github.com/rook/rook/pkg/operator/k8sutil" - "k8s.io/client-go/tools/cache" -) - -const ( - removeAttachmentRetryInterval = 2 // seconds - removeAttachmentMaxRetries = 3 -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "agent-cluster") -) - -// ClusterController monitors cluster events and reacts to clean up any affected volume attachments -type ClusterController struct { - context *clusterd.Context - volumeAttachment attachment.Attachment - flexvolumeController flexvolume.VolumeController -} - -// NewClusterController creates a new instance of a ClusterController -func NewClusterController(context *clusterd.Context, flexvolumeController flexvolume.VolumeController, - volumeAttachment attachment.Attachment) *ClusterController { - - return &ClusterController{ - context: context, - volumeAttachment: volumeAttachment, - flexvolumeController: flexvolumeController, - } -} - -// StartWatch will start the watching of cluster events by this controller -func (c *ClusterController) StartWatch(namespace string, stopCh chan struct{}) { - resourceHandlerFuncs := cache.ResourceEventHandlerFuncs{ - DeleteFunc: c.onDelete, - } - - logger.Infof("start watching cluster resources") - go k8sutil.WatchCR(opcontroller.ClusterResource, namespace, resourceHandlerFuncs, c.context.RookClientset.CephV1().RESTClient(), &cephv1.CephCluster{}, stopCh) -} - -func (c *ClusterController) onDelete(obj interface{}) { - cluster, ok := obj.(*cephv1.CephCluster) - if !ok { - return - } - cluster = cluster.DeepCopy() - - c.handleClusterDelete(cluster, removeAttachmentRetryInterval*time.Second) -} - -func (c *ClusterController) handleClusterDelete(cluster *cephv1.CephCluster, retryInterval time.Duration) { - node := os.Getenv(k8sutil.NodeNameEnvVar) - agentNamespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - logger.Infof("cluster in namespace %q is being deleted, agent on node %q will attempt to clean up.", cluster.Namespace, node) - - // TODO: filter this List operation by node name and cluster namespace on the server side - vols, err := c.volumeAttachment.List(agentNamespace) - if err != nil { - logger.Errorf("failed to get volume attachments for agent namespace %s. %v", agentNamespace, err) - } - - var waitGroup sync.WaitGroup - var cleanupList []string - - // find volume attachments in the deleted cluster that are attached to this node - for _, vol := range vols.Items { - for _, a := range vol.Attachments { - if a.Node == node && a.ClusterName == cluster.Namespace { - logger.Infof("volume %q has an attachment belonging to deleted cluster %q, will clean it up now. mountDir: %q", - vol.Name, cluster.Namespace, a.MountDir) - - // we will perform all the cleanup asynchronously later on. Right now, just add this one - // to the list and increment the wait group counter so we know up front the full list that - // we need to wait on before any of them start executing. - waitGroup.Add(1) - cleanupList = append(cleanupList, a.MountDir) - } - } - } - - for i := range cleanupList { - // start a goroutine to perform the cleanup of this volume attachment asynchronously. - // if one cleanup hangs, it will not affect the others. - go func(mountDir string) { - defer waitGroup.Done() - if err := c.cleanupVolumeAttachment(mountDir, retryInterval); err != nil { - logger.Errorf("failed to clean up attachment for mountDir %q. %v", mountDir, err) - } else { - logger.Infof("cleaned up attachment for mountDir %q", mountDir) - } - }(cleanupList[i]) - } - - logger.Info("waiting for all volume cleanup goroutines to complete...") - waitGroup.Wait() - logger.Info("completed waiting for all volume cleanup") -} - -func (c *ClusterController) cleanupVolumeAttachment(mountDir string, retryInterval time.Duration) error { - // first get the attachment info - attachInfo := flexvolume.AttachOptions{MountDir: mountDir} - if err := c.flexvolumeController.GetAttachInfoFromMountDir(attachInfo.MountDir, &attachInfo); err != nil { - return err - } - - // forcefully detach the volume using the attachment info - if err := c.flexvolumeController.DetachForce(attachInfo, nil); err != nil { - return err - } - - // remove this attachment from the CRD - var safeToDelete bool - retryCount := 0 - for { - safeToDelete = false - err := c.flexvolumeController.RemoveAttachmentObject(attachInfo, &safeToDelete) - if err == nil { - break - } - - // the removal of the attachment object failed. This can happen if another agent or goroutine - // was trying to remove an attachment at the same time, due to consistency guarantees in the - // Kubernetes API. Let's wait a bit and retry again. - retryCount++ - if retryCount > removeAttachmentMaxRetries { - logger.Errorf("exceeded maximum retries for removing attachment object.") - return err - } - - logger.Infof("failed to remove the attachment object for mount dir %s, will retry again in %s", - mountDir, retryInterval) - <-time.After(retryInterval) - } - - if safeToDelete { - // its safe to delete the CRD entirely, do so now - namespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - crdName := attachInfo.VolumeName - if err := c.volumeAttachment.Delete(namespace, crdName); err != nil { - return err - } - } - - return nil -} diff --git a/pkg/daemon/ceph/agent/cluster/controller_test.go b/pkg/daemon/ceph/agent/cluster/controller_test.go deleted file mode 100644 index db0f66a24..000000000 --- a/pkg/daemon/ceph/agent/cluster/controller_test.go +++ /dev/null @@ -1,284 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "fmt" - "os" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookv1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestClusterDeleteSingleAttachment(t *testing.T) { - - nodeName := "node09234" - clusterName := "cluster4628" - podName := "pod7620" - pvName := "pvc-1427" - rookSystemNamespace := "rook-system-03931" - - os.Setenv(k8sutil.PodNamespaceEnvVar, rookSystemNamespace) - os.Setenv(k8sutil.NodeNameEnvVar, nodeName) - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - clientset := test.New(t, 3) - context := &clusterd.Context{ - Clientset: clientset, - } - - // set up an existing volume attachment CRD that belongs to this node and the cluster we will delete later - existingVolAttachList := &rookv1alpha2.VolumeList{ - Items: []rookv1alpha2.Volume{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: pvName, - Namespace: rookSystemNamespace, - }, - Attachments: []rookv1alpha2.Attachment{ - { - Node: nodeName, - MountDir: getMockMountDir(podName, pvName), - ClusterName: clusterName, - }, - }, - }, - }, - } - - detachCalled := false - deleteAttachmentCalled := false - removeAttachmentCalled := false - - volumeAttachmentController := &attachment.MockAttachment{ - MockList: func(namespace string) (*rookv1alpha2.VolumeList, error) { - return existingVolAttachList, nil - }, - MockDelete: func(namespace, name string) error { - assert.Equal(t, rookSystemNamespace, namespace) - assert.Equal(t, pvName, name) - deleteAttachmentCalled = true - return nil - }, - } - flexvolumeController := &flexvolume.MockFlexvolumeController{ - MockGetAttachInfoFromMountDir: func(mountDir string, attachOptions *flexvolume.AttachOptions) error { - assert.Equal(t, getMockMountDir(podName, pvName), mountDir) - attachOptions.VolumeName = pvName - return nil - }, - MockDetachForce: func(detachOpts flexvolume.AttachOptions, _ *struct{} /* void reply */) error { - <-time.After(10 * time.Millisecond) // simulate the detach taking some time (even though it's a small amount) - detachCalled = true - return nil - }, - MockRemoveAttachmentObject: func(detachOpts flexvolume.AttachOptions, safeToDetach *bool) error { - removeAttachmentCalled = true - *safeToDetach = true - return nil - }, - } - - controller := NewClusterController(context, flexvolumeController, volumeAttachmentController) - - // tell the cluster controller that a cluster has been deleted. the controller will perform the cleanup - // async, but block and wait for it all to complete before returning to us, so there should be no races - // with the asserts later on. - clusterToDelete := &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Namespace: clusterName}} - controller.handleClusterDelete(clusterToDelete, time.Millisecond) - - // detaching, removing the attachment from the CRD, and deleting the CRD should have been called - assert.True(t, detachCalled) - assert.True(t, removeAttachmentCalled) - assert.True(t, deleteAttachmentCalled) -} - -func TestClusterDeleteAttachedToOtherNode(t *testing.T) { - - nodeName := "node314" - clusterName := "cluster6841" - podName := "pod9134" - pvName := "pvc-1489" - rookSystemNamespace := "rook-system-0084" - - os.Setenv(k8sutil.PodNamespaceEnvVar, rookSystemNamespace) - os.Setenv(k8sutil.NodeNameEnvVar, nodeName) - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - clientset := test.New(t, 3) - context := &clusterd.Context{ - Clientset: clientset, - } - - // set up an existing volume attachment CRD that belongs to another node - existingVolAttachList := &rookv1alpha2.VolumeList{ - Items: []rookv1alpha2.Volume{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: pvName, - Namespace: rookSystemNamespace, - }, - Attachments: []rookv1alpha2.Attachment{ - { - Node: "some other node", - MountDir: getMockMountDir(podName, pvName), - ClusterName: clusterName, - }, - }, - }, - }, - } - - getAttachInfoCalled := false - - volumeAttachmentController := &attachment.MockAttachment{ - MockList: func(namespace string) (*rookv1alpha2.VolumeList, error) { - return existingVolAttachList, nil - }, - } - flexvolumeController := &flexvolume.MockFlexvolumeController{ - MockGetAttachInfoFromMountDir: func(mountDir string, attachOptions *flexvolume.AttachOptions) error { - getAttachInfoCalled = true // this should not get called since it belongs to another node - return nil - }, - } - - controller := NewClusterController(context, flexvolumeController, volumeAttachmentController) - - // delete the cluster, nothing should happen - clusterToDelete := &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Namespace: clusterName}} - controller.handleClusterDelete(clusterToDelete, time.Millisecond) - - // since the volume attachment was on a different node, nothing should have been called - assert.False(t, getAttachInfoCalled) -} - -func TestClusterDeleteMultiAttachmentRace(t *testing.T) { - - nodeName := "node09234" - clusterName := "cluster4628" - podName1 := "pod7620" - podName2 := "pod216" - pvName := "pvc-1427" - rookSystemNamespace := "rook-system-03931" - - os.Setenv(k8sutil.PodNamespaceEnvVar, rookSystemNamespace) - os.Setenv(k8sutil.NodeNameEnvVar, nodeName) - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - clientset := test.New(t, 3) - context := &clusterd.Context{ - Clientset: clientset, - } - - // set up an existing volume attachment CRD that has two pods using the same underlying volume. - existingVolAttachList := &rookv1alpha2.VolumeList{ - Items: []rookv1alpha2.Volume{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: pvName, - Namespace: rookSystemNamespace, - }, - Attachments: []rookv1alpha2.Attachment{ - { - Node: nodeName, - MountDir: getMockMountDir(podName1, pvName), - ClusterName: clusterName, - }, - { - Node: nodeName, - MountDir: getMockMountDir(podName2, pvName), - ClusterName: clusterName, - }, - }, - }, - }, - } - - var lock sync.Mutex - - deleteCount := 0 - volumeAttachmentController := &attachment.MockAttachment{ - MockList: func(namespace string) (*rookv1alpha2.VolumeList, error) { - return existingVolAttachList, nil - }, - MockDelete: func(namespace, name string) error { - lock.Lock() - defer lock.Unlock() - deleteCount++ - return nil - }, - } - - removeCount := 0 - flexvolumeController := &flexvolume.MockFlexvolumeController{ - MockGetAttachInfoFromMountDir: func(mountDir string, attachOptions *flexvolume.AttachOptions) error { - attachOptions.VolumeName = pvName - return nil - }, - MockDetachForce: func(detachOpts flexvolume.AttachOptions, _ *struct{} /* void reply */) error { - <-time.After(10 * time.Millisecond) // simulate the detach taking some time (even though it's a small amount) - return nil - }, - MockRemoveAttachmentObject: func(detachOpts flexvolume.AttachOptions, safeToDetach *bool) error { - // Removing the attachment object is interesting from a concurrency perspective. If two callers - // are attempting to remove from an attachment from the CRD at the same time, it could fail. - // Let's simulate that outcome in this function. - lock.Lock() - defer lock.Unlock() - - removeCount++ - *safeToDetach = true - - if removeCount%2 == 0 { - // every other time, simulate a failure to remove the attachment, e.g., someone else - // updated it before we could. - return errors.New("mock error for failing to remove the volume attachment") - } - - return nil - }, - } - - // kick off the cluster deletion process - controller := NewClusterController(context, flexvolumeController, volumeAttachmentController) - clusterToDelete := &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Namespace: clusterName}} - controller.handleClusterDelete(clusterToDelete, time.Millisecond) - - // both attachments should have made it all the way through the clean up process, meaning that Delete - // (which is idempotent) should have been called twice. - assert.Equal(t, 2, deleteCount) -} - -func getMockMountDir(podName, pvName string) string { - return fmt.Sprintf("/test/pods/%s/volumes/rook.io~rook/%s", podName, pvName) -} diff --git a/pkg/daemon/ceph/agent/flexvolume/attachment/crd.go b/pkg/daemon/ceph/agent/flexvolume/attachment/crd.go deleted file mode 100644 index 3a633ab7e..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/attachment/crd.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package attachment - -import ( - "context" - - "github.com/coreos/pkg/capnslog" - rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" - "github.com/rook/rook/pkg/clusterd" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "rook-ceph-agent-crd") - -// Attachment handles custom resource Volume storage operations. -type Attachment interface { - Create(volumeAttachment *rookalpha.Volume) error - Get(namespace, name string) (*rookalpha.Volume, error) - List(namespace string) (*rookalpha.VolumeList, error) - Update(volumeAttachment *rookalpha.Volume) error - Delete(namespace, name string) error -} - -// CRD is a controller to manage Volume CRD objects -type crd struct { - context *clusterd.Context -} - -// CreateController creates a new controller for volume attachment -func New(context *clusterd.Context) (Attachment, error) { - return &crd{context: context}, nil -} - -// Get queries the Volume CRD from Kubernetes -func (c *crd) Get(namespace, name string) (*rookalpha.Volume, error) { - return c.context.RookClientset.RookV1alpha2().Volumes(namespace).Get(context.TODO(), name, metav1.GetOptions{}) -} - -// List lists all the volume attachment CRD resources in the given namespace -func (c *crd) List(namespace string) (*rookalpha.VolumeList, error) { - return c.context.RookClientset.RookV1alpha2().Volumes(namespace).List(context.TODO(), metav1.ListOptions{}) -} - -// Create creates the volume attach CRD resource in Kubernetes -func (c *crd) Create(volumeAttachment *rookalpha.Volume) error { - _, err := c.context.RookClientset.RookV1alpha2().Volumes(volumeAttachment.Namespace).Create(context.TODO(), volumeAttachment, metav1.CreateOptions{}) - return err -} - -// Update updates Volume resource -func (c *crd) Update(volumeAttachment *rookalpha.Volume) error { - _, err := c.context.RookClientset.RookV1alpha2().Volumes(volumeAttachment.Namespace).Update(context.TODO(), volumeAttachment, metav1.UpdateOptions{}) - if err != nil { - logger.Errorf("failed to update Volume CRD. %v", err) - return err - } - logger.Infof("updated Volumeattach CRD %q", volumeAttachment.ObjectMeta.Name) - return nil -} - -// Delete deletes the volume attach CRD resource in Kubernetes -func (c *crd) Delete(namespace, name string) error { - return c.context.RookClientset.RookV1alpha2().Volumes(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} diff --git a/pkg/daemon/ceph/agent/flexvolume/attachment/fake.go b/pkg/daemon/ceph/agent/flexvolume/attachment/fake.go deleted file mode 100644 index bac43b9ab..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/attachment/fake.go +++ /dev/null @@ -1,63 +0,0 @@ -// /* -// Copyright 2017 The Rook Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at - -// http://www.apache.org/licenses/LICENSE-2.0 - -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// */ - -package attachment - -import ( - rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" -) - -type MockAttachment struct { - MockCreate func(volumeAttachment *rookalpha.Volume) error - MockGet func(namespace, name string) (*rookalpha.Volume, error) - MockList func(namespace string) (*rookalpha.VolumeList, error) - MockUpdate func(volumeAttachment *rookalpha.Volume) error - MockDelete func(namespace, name string) error -} - -func (m *MockAttachment) Create(volumeAttachment *rookalpha.Volume) error { - if m.MockCreate != nil { - return m.MockCreate(volumeAttachment) - } - return nil -} -func (m *MockAttachment) Get(namespace, name string) (*rookalpha.Volume, error) { - if m.MockGet != nil { - return m.MockGet(namespace, name) - } - return &rookalpha.Volume{}, nil -} - -func (m *MockAttachment) List(namespace string) (*rookalpha.VolumeList, error) { - if m.MockList != nil { - return m.MockList(namespace) - } - return &rookalpha.VolumeList{}, nil -} - -func (m *MockAttachment) Update(volumeAttachment *rookalpha.Volume) error { - if m.MockUpdate != nil { - return m.MockUpdate(volumeAttachment) - } - return nil -} - -func (m *MockAttachment) Delete(namespace, name string) error { - if m.MockDelete != nil { - return m.MockDelete(namespace, name) - } - return nil -} diff --git a/pkg/daemon/ceph/agent/flexvolume/attachment/resource.go b/pkg/daemon/ceph/agent/flexvolume/attachment/resource.go deleted file mode 100644 index 91fdc88e6..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/attachment/resource.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package attachment to manage Kubernetes storage attach events. -package attachment - -import ( - "reflect" - - rook "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" - rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" - "github.com/rook/rook/pkg/operator/k8sutil" -) - -const ( - CustomResourceName = "volume" - CustomResourceNamePlural = "volumes" -) - -// VolumeResource represents the Volume custom resource object -var VolumeResource = k8sutil.CustomResource{ - Name: CustomResourceName, - Plural: CustomResourceNamePlural, - Group: rook.CustomResourceGroupName, - Version: rookalpha.Version, - Kind: reflect.TypeOf(rookalpha.Volume{}).Name(), -} diff --git a/pkg/daemon/ceph/agent/flexvolume/controller.go b/pkg/daemon/ceph/agent/flexvolume/controller.go deleted file mode 100644 index a6eab2692..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/controller.go +++ /dev/null @@ -1,496 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package flexvolume to manage Kubernetes storage attach events. -package flexvolume - -import ( - "context" - "os" - "path" - "path/filepath" - "strings" - - "github.com/rook/rook/pkg/util/display" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - "github.com/rook/rook/pkg/operator/ceph/agent" - "github.com/rook/rook/pkg/operator/ceph/cluster" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/types" -) - -const ( - // ClusterNamespaceKey key for cluster namespace option. - ClusterNamespaceKey = "clusterNamespace" - // ClusterNameKey key for cluster name option (deprecated). - ClusterNameKey = "clusterName" - // StorageClassKey key for storage class name option. - StorageClassKey = "storageClass" - // PoolKey key for pool name option. - PoolKey = "pool" - // BlockPoolKey key for blockPool name option. - BlockPoolKey = "blockPool" - // PoolKey key for image name option. - ImageKey = "image" - // PoolKey key for data pool name option. - DataBlockPoolKey = "dataBlockPool" - kubeletDefaultRootDir = "/var/lib/kubelet" -) - -var driverLogger = capnslog.NewPackageLogger("github.com/rook/rook", "flexdriver") - -// Controller handles all events from the Flexvolume driver -type Controller struct { - context *clusterd.Context - volumeManager VolumeManager - volumeAttachment attachment.Attachment - mountSecurityMode string -} - -// ClientAccessInfo hols info for Ceph access -type ClientAccessInfo struct { - MonAddresses []string `json:"monAddresses"` - UserName string `json:"userName"` - SecretKey string `json:"secretKey"` -} - -// NewController create a new controller to handle events from the flexvolume driver -func NewController(context *clusterd.Context, volumeAttachment attachment.Attachment, manager VolumeManager, mountSecurityMode string) *Controller { - return &Controller{ - context: context, - volumeAttachment: volumeAttachment, - volumeManager: manager, - mountSecurityMode: mountSecurityMode, - } -} - -// Attach attaches rook volume to the node -func (c *Controller) Attach(attachOpts AttachOptions, devicePath *string) error { - ctx := context.TODO() - namespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - node := os.Getenv(k8sutil.NodeNameEnvVar) - - // Name of CRD is the PV name. This is done so that the CRD can be use for fencing - crdName := attachOpts.VolumeName - - // Check if this volume has been attached - volumeattachObj, err := c.volumeAttachment.Get(namespace, crdName) - if err != nil { - if !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get volume CRD %s", crdName) - } - // No volumeattach CRD for this volume found. Create one - volumeattachObj = rookalpha.NewVolume( - crdName, - namespace, - node, - attachOpts.PodNamespace, - attachOpts.Pod, - attachOpts.ClusterNamespace, - attachOpts.MountDir, - strings.ToLower(attachOpts.RW) == ReadOnly, - ) - logger.Infof("creating Volume attach Resource %s/%s: %+v", volumeattachObj.Namespace, volumeattachObj.Name, attachOpts) - err = c.volumeAttachment.Create(volumeattachObj) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create volume CRD %s", crdName) - } - // Some other attacher beat us in this race. Kubernetes will retry again. - return errors.Errorf("failed to attach volume %s for pod %s/%s. Volume is already attached by a different pod", - crdName, attachOpts.PodNamespace, attachOpts.Pod) - } - } else { - // Volume has already been attached. - // find if the attachment object has been previously created. - // This could be in the case of a multiple attachment for ROs or - // it could be the the Volume record was created previously and - // the attach operation failed and Kubernetes retried. - found := false - for _, a := range volumeattachObj.Attachments { - if a.MountDir == attachOpts.MountDir { - found = true - } - } - - if !found { - // Check if there is already an attachment with RW. - index := getPodRWAttachmentObject(volumeattachObj) - if index != -1 { - // check if the RW attachment is orphaned. - attachment := &volumeattachObj.Attachments[index] - - logger.Infof("volume attachment record %s/%s exists for pod: %s/%s", volumeattachObj.Namespace, volumeattachObj.Name, attachment.PodNamespace, attachment.PodName) - // Note this could return the reference of the pod who is requesting the attach if this pod have the same name as the pod in the attachment record. - allowAttach := false - pod, err := c.context.Clientset.CoreV1().Pods(attachment.PodNamespace).Get(ctx, attachment.PodName, metav1.GetOptions{}) - if err != nil { - if !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get pod CRD %s/%s", attachment.PodNamespace, attachment.PodName) - } - allowAttach = true - logger.Infof("volume attachment record %s/%s is orphaned. Updating record with new attachment information for pod %s/%s", volumeattachObj.Namespace, volumeattachObj.Name, attachOpts.PodNamespace, attachOpts.Pod) - } - if err == nil && (attachment.PodNamespace == attachOpts.PodNamespace && attachment.PodName == attachOpts.Pod && attachment.Node == node) { - allowAttach = true - logger.Infof("volume attachment record %s/%s is starting on the same node. Updating record with new attachment information for pod %s/%s", volumeattachObj.Namespace, volumeattachObj.Name, attachOpts.PodNamespace, attachOpts.Pod) - } - if allowAttach { - // Update attachment record and proceed with attaching - attachment.Node = node - attachment.MountDir = attachOpts.MountDir - attachment.PodNamespace = attachOpts.PodNamespace - attachment.PodName = attachOpts.Pod - attachment.ClusterName = attachOpts.ClusterNamespace - attachment.ReadOnly = attachOpts.RW == ReadOnly - err = c.volumeAttachment.Update(volumeattachObj) - if err != nil { - return errors.Wrapf(err, "failed to update volume CRD %s", crdName) - } - } else { - // Attachment is not orphaned. Original pod still exists. Don't attach. - return errors.Errorf("failed to attach volume %s for pod %s/%s. Volume is already attached by pod %s/%s. Status %+v", - crdName, attachOpts.PodNamespace, attachOpts.Pod, attachment.PodNamespace, attachment.PodName, pod.Status.Phase) - } - } else { - // No RW attachment found. Check if this is a RW attachment request. - // We only support RW once attachment. No mixing either with RO - if attachOpts.RW == "rw" && len(volumeattachObj.Attachments) > 0 { - return errors.Errorf("failed to attach volume %s for pod %s/%s. Volume is already attached by one or more pods", - crdName, attachOpts.PodNamespace, attachOpts.Pod) - } - - // Create a new attachment record and proceed with attaching - newAttach := rookalpha.Attachment{ - Node: node, - PodNamespace: attachOpts.PodNamespace, - PodName: attachOpts.Pod, - ClusterName: attachOpts.ClusterNamespace, - MountDir: attachOpts.MountDir, - ReadOnly: attachOpts.RW == ReadOnly, - } - volumeattachObj.Attachments = append(volumeattachObj.Attachments, newAttach) - err = c.volumeAttachment.Update(volumeattachObj) - if err != nil { - return errors.Wrapf(err, "failed to update volume CRD %s", crdName) - } - } - } - } - *devicePath, err = c.volumeManager.Attach(attachOpts.Image, attachOpts.BlockPool, attachOpts.MountUser, attachOpts.MountSecret, attachOpts.ClusterNamespace) - if err != nil { - return errors.Wrapf(err, "failed to attach volume %s/%s", attachOpts.BlockPool, attachOpts.Image) - } - return nil -} - -// Expand RBD image -func (c *Controller) Expand(expandArgs ExpandArgs, _ *struct{}) error { - expandOpts := expandArgs.ExpandOptions - sizeInMb := display.BToMb(expandArgs.Size) - err := c.volumeManager.Expand(expandOpts.Image, expandOpts.Pool, expandOpts.ClusterNamespace, sizeInMb) - if err != nil { - return errors.Wrapf(err, "failed to resize volume %s/%s", expandOpts.Pool, expandOpts.Image) - } - return nil -} - -// Detach detaches a rook volume to the node -func (c *Controller) Detach(detachOpts AttachOptions, _ *struct{} /* void reply */) error { - return c.doDetach(detachOpts, false /* force */) -} - -// DetachForce forces a detach on a rook volume to the node -func (c *Controller) DetachForce(detachOpts AttachOptions, _ *struct{} /* void reply */) error { - return c.doDetach(detachOpts, true /* force */) -} - -func (c *Controller) doDetach(detachOpts AttachOptions, force bool) error { - if err := c.volumeManager.Detach( - detachOpts.Image, - detachOpts.BlockPool, - detachOpts.MountUser, - detachOpts.MountSecret, - detachOpts.ClusterNamespace, - force, - ); err != nil { - return errors.Wrapf(err, "failed to detach volume %s/%s", detachOpts.BlockPool, detachOpts.Image) - } - - namespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - crdName := detachOpts.VolumeName - volumeAttach, err := c.volumeAttachment.Get(namespace, crdName) - if err != nil { - return errors.Wrapf(err, "failed to get VolumeAttachment for %s in namespace %s", crdName, namespace) - } - if len(volumeAttach.Attachments) == 0 { - logger.Infof("Deleting Volume CRD %s/%s", namespace, crdName) - return c.volumeAttachment.Delete(namespace, crdName) - } - return nil -} - -// RemoveAttachmentObject removes the attachment from the Volume CRD and returns whether the volume is safe to detach -func (c *Controller) RemoveAttachmentObject(detachOpts AttachOptions, safeToDetach *bool) error { - namespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - crdName := detachOpts.VolumeName - logger.Infof("deleting attachment for mountDir %s from Volume attach CRD %s/%s", detachOpts.MountDir, namespace, crdName) - volumeAttach, err := c.volumeAttachment.Get(namespace, crdName) - if err != nil { - return errors.Wrapf(err, "failed to get Volume attach CRD %s/%s", namespace, crdName) - } - node := os.Getenv(k8sutil.NodeNameEnvVar) - nodeAttachmentCount := 0 - needUpdate := false - for i, v := range volumeAttach.Attachments { - if v.Node == node { - nodeAttachmentCount++ - if v.MountDir == detachOpts.MountDir { - // Deleting slice - volumeAttach.Attachments = append(volumeAttach.Attachments[:i], volumeAttach.Attachments[i+1:]...) - needUpdate = true - } - } - } - - if needUpdate { - // only one attachment on this node, which is the one that got removed. - if nodeAttachmentCount == 1 { - *safeToDetach = true - } - return c.volumeAttachment.Update(volumeAttach) - } - return errors.Errorf("volume CRD %s found but attachment to the mountDir %s was not found", crdName, detachOpts.MountDir) -} - -// Log logs messages from the driver -func (c *Controller) Log(message LogMessage, _ *struct{} /* void reply */) error { - if message.IsError { - driverLogger.Error(message.Message) - } else { - driverLogger.Info(message.Message) - } - return nil -} - -func (c *Controller) parseClusterNamespace(storageClassName string) (string, error) { - ctx := context.TODO() - sc, err := c.context.Clientset.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) - if err != nil { - return "", err - } - clusterNamespace, ok := sc.Parameters["clusterNamespace"] - if !ok { - // Checks for older version of parameter i.e., clusterName if clusterNamespace not found - logger.Infof("clusterNamespace not specified in the storage class %s. Checking for clusterName", storageClassName) - clusterNamespace, ok = sc.Parameters["clusterName"] - if !ok { - // Defaults to rook if not found - logger.Infof("clusterNamespace not specified in the storage class %s. Defaulting to '%s'", storageClassName, cluster.DefaultClusterName) - return cluster.DefaultClusterName, nil - } - return clusterNamespace, nil - } - return clusterNamespace, nil -} - -// GetAttachInfoFromMountDir obtain pod and volume information from the mountDir. K8s does not provide -// all necessary information to detach a volume (https://github.com/kubernetes/kubernetes/issues/52590). -// So we are hacking a bit and by parsing it from mountDir -func (c *Controller) GetAttachInfoFromMountDir(mountDir string, attachOptions *AttachOptions) error { - ctx := context.TODO() - if attachOptions.PodID == "" { - podID, pvName, err := getPodAndPVNameFromMountDir(mountDir) - if err != nil { - return err - } - attachOptions.PodID = podID - attachOptions.VolumeName = pvName - } - - pv, err := c.context.Clientset.CoreV1().PersistentVolumes().Get(ctx, attachOptions.VolumeName, metav1.GetOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to get persistent volume %s", attachOptions.VolumeName) - } - - if attachOptions.PodNamespace == "" { - // pod namespace should be the same as the PVC namespace - attachOptions.PodNamespace = pv.Spec.ClaimRef.Namespace - } - - node := os.Getenv(k8sutil.NodeNameEnvVar) - if attachOptions.Pod == "" { - // Find all pods scheduled to this node - opts := metav1.ListOptions{ - FieldSelector: fields.OneTermEqualSelector("spec.nodeName", node).String(), - } - pods, err := c.context.Clientset.CoreV1().Pods(attachOptions.PodNamespace).List(ctx, opts) - if err != nil { - return errors.Wrapf(err, "failed to get pods in namespace %s", attachOptions.PodNamespace) - } - - pod := findPodByID(pods, types.UID(attachOptions.PodID)) - if pod != nil { - attachOptions.Pod = pod.GetName() - } - } - - if attachOptions.Image == "" { - attachOptions.Image = pv.Spec.PersistentVolumeSource.FlexVolume.Options[ImageKey] - } - if attachOptions.BlockPool == "" { - attachOptions.BlockPool = pv.Spec.PersistentVolumeSource.FlexVolume.Options[BlockPoolKey] - if attachOptions.BlockPool == "" { - // fall back to the "pool" if the "blockPool" is not set - attachOptions.BlockPool = pv.Spec.PersistentVolumeSource.FlexVolume.Options[PoolKey] - } - } - if attachOptions.StorageClass == "" { - attachOptions.StorageClass = pv.Spec.PersistentVolumeSource.FlexVolume.Options[StorageClassKey] - } - if attachOptions.MountUser == "" { - attachOptions.MountUser = "admin" - } - attachOptions.ClusterNamespace, err = c.parseClusterNamespace(attachOptions.StorageClass) - if err != nil { - return errors.Wrapf(err, "failed to parse clusterNamespace from storageClass %s", attachOptions.StorageClass) - } - return nil -} - -// GetGlobalMountPath generate the global mount path where the device path is mounted. -// It is based on the kubelet root dir, which defaults to /var/lib/kubelet -func (c *Controller) GetGlobalMountPath(input GlobalMountPathInput, globalMountPath *string) error { - vendor, driver, err := getFlexDriverInfo(input.DriverDir) - if err != nil { - return err - } - - *globalMountPath = path.Join(c.getKubeletRootDir(), "plugins", vendor, driver, "mounts", input.VolumeName) - return nil -} - -// GetClientAccessInfo obtains the cluster monitor endpoints, username and secret -func (c *Controller) GetClientAccessInfo(args []string, clientAccessInfo *ClientAccessInfo) error { - ctx := context.TODO() - // args: 0 ClusterNamespace, 1 PodNamespace, 2 MountUser, 3 MountSecret - clusterNamespace := args[0] - clusterInfo, _, _, err := mon.LoadClusterInfo(c.context, clusterNamespace) - if err != nil { - return errors.Wrapf(err, "failed to load cluster information from clusters namespace %s", clusterNamespace) - } - - monEndpoints := make([]string, 0, len(clusterInfo.Monitors)) - for _, monitor := range clusterInfo.Monitors { - monEndpoints = append(monEndpoints, monitor.Endpoint) - } - - clientAccessInfo.MonAddresses = monEndpoints - - podNamespace := args[1] - clientAccessInfo.UserName = args[2] - clientAccessInfo.SecretKey = args[3] - - if c.mountSecurityMode == agent.MountSecurityModeRestricted && (clientAccessInfo.UserName == "" || clientAccessInfo.SecretKey == "") { - return errors.New("no mount user and/or mount secret given") - } - - if c.mountSecurityMode == agent.MountSecurityModeAny && clientAccessInfo.UserName == "" { - clientAccessInfo.UserName = "admin" - } - - if clientAccessInfo.SecretKey != "" { - secret, err := c.context.Clientset.CoreV1().Secrets(podNamespace).Get(ctx, clientAccessInfo.SecretKey, metav1.GetOptions{}) - if err != nil { - return errors.Wrapf(err, "unable to get mount secret %s from pod namespace %s", clientAccessInfo.SecretKey, podNamespace) - } - if len(secret.Data) == 0 || len(secret.Data) > 1 { - return errors.Errorf("no data or more than one data (length %d) in mount secret %s in namespace %s", len(secret.Data), clientAccessInfo.SecretKey, podNamespace) - } - var secretValue string - for _, value := range secret.Data { - secretValue = string(value[:]) - break - } - clientAccessInfo.SecretKey = secretValue - } else if c.mountSecurityMode == agent.MountSecurityModeAny && clientAccessInfo.SecretKey == "" { - clientAccessInfo.SecretKey = clusterInfo.CephCred.Secret - } - - return nil -} - -// GetKernelVersion returns the kernel version of the current node. -func (c *Controller) GetKernelVersion(_ *struct{} /* no inputs */, kernelVersion *string) error { - ctx := context.TODO() - nodeName := os.Getenv(k8sutil.NodeNameEnvVar) - node, err := c.context.Clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to get kernel version from node information for node %s", nodeName) - } - *kernelVersion = node.Status.NodeInfo.KernelVersion - return nil -} - -// getKubeletRootDir queries the kubelet configuration to find the kubelet root dir. Defaults to /var/lib/kubelet -func (c *Controller) getKubeletRootDir() string { - // in k8s 1.8 it does not appear possible to change the default root dir - // see https://github.com/rook/rook/issues/1282 - return kubeletDefaultRootDir -} - -// getPodAndPVNameFromMountDir parses pod information from the mountDir -func getPodAndPVNameFromMountDir(mountDir string) (string, string, error) { - // mountDir is in the form of /pods//volumes/rook.io~rook/ - filepath.Clean(mountDir) - token := strings.Split(mountDir, string(filepath.Separator)) - // token length should at least size 5 - length := len(token) - if length < 5 { - return "", "", errors.Errorf("failed to parse mountDir %s for CRD name and podID", mountDir) - } - return token[length-4], token[length-1], nil -} - -func findPodByID(pods *v1.PodList, podUID types.UID) *v1.Pod { - for i := range pods.Items { - if pods.Items[i].GetUID() == podUID { - return &(pods.Items[i]) - } - } - return nil -} - -// getPodRWAttachmentObject loops through the list of attachments of the Volume -// resource and returns the index of the first RW attachment object -func getPodRWAttachmentObject(volumeAttachmentObject *rookalpha.Volume) int { - for i, a := range volumeAttachmentObject.Attachments { - if !a.ReadOnly { - return i - } - } - return -1 -} diff --git a/pkg/daemon/ceph/agent/flexvolume/controller_test.go b/pkg/daemon/ceph/agent/flexvolume/controller_test.go deleted file mode 100644 index 73104da68..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/controller_test.go +++ /dev/null @@ -1,858 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package flexvolume to manage Kubernetes storage attach events. -package flexvolume - -import ( - "context" - "os" - "testing" - - rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/manager" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestAttach(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - devicePath := "" - opts := AttachOptions{ - Image: "image123", - Pool: "testpool", - ClusterNamespace: "testCluster", - StorageClass: "storageclass1", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - VolumeName: "pvc-123", - Pod: "myPod", - PodNamespace: "Default", - RW: "rw", - } - att, err := attachment.New(context) - assert.Nil(t, err) - - controller := &Controller{ - context: context, - volumeAttachment: att, - volumeManager: &manager.FakeVolumeManager{}, - } - - err = controller.Attach(opts, &devicePath) - assert.Nil(t, err) - volumeAttachment, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Get(ctx, "pvc-123", metav1.GetOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - assert.Equal(t, 1, len(volumeAttachment.Attachments)) - - a := volumeAttachment.Attachments[0] - assert.Equal(t, "node1", a.Node) - assert.Equal(t, "Default", a.PodNamespace) - assert.Equal(t, "myPod", a.PodName) - assert.Equal(t, "testCluster", a.ClusterName) - assert.Equal(t, "/test/pods/pod123/volumes/rook.io~rook/pvc-123", a.MountDir) - assert.False(t, a.ReadOnly) -} - -func TestAttachAlreadyExist(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "otherpod", - Namespace: "Default", - }, - Status: v1.PodStatus{ - Phase: "running", - }, - } - _, err := clientset.CoreV1().Pods("Default").Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - - existingCRD := &rookalpha.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-123", - Namespace: "rook-system", - }, - Attachments: []rookalpha.Attachment{ - { - Node: "node1", - PodNamespace: "Default", - PodName: "otherpod", - MountDir: "/tmt/test", - ReadOnly: false, - }, - }, - } - - volumeAttachment, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Create(ctx, existingCRD, metav1.CreateOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - - var devicePath *string - opts := AttachOptions{ - Image: "image123", - Pool: "testpool", - StorageClass: "storageclass1", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - VolumeName: "pvc-123", - Pod: "myPod", - PodNamespace: "Default", - RW: "rw", - } - - att, err := attachment.New(context) - assert.Nil(t, err) - - controller := &Controller{ - context: context, - volumeAttachment: att, - volumeManager: &manager.FakeVolumeManager{}, - } - - err = controller.Attach(opts, devicePath) - assert.NotNil(t, err) - assert.Equal(t, "failed to attach volume pvc-123 for pod Default/myPod. Volume is already attached by pod Default/otherpod. Status running", err.Error()) -} - -func TestAttachReadOnlyButRWAlreadyExist(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "otherpod", - Namespace: "Default", - }, - Status: v1.PodStatus{ - Phase: "running", - }, - } - _, err := clientset.CoreV1().Pods("Default").Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - - existingCRD := &rookalpha.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-123", - Namespace: "rook-system", - }, - Attachments: []rookalpha.Attachment{ - { - Node: "node1", - PodNamespace: "Default", - PodName: "otherpod", - MountDir: "/tmt/test", - ReadOnly: false, - }, - }, - } - volumeAttachment, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Create(ctx, existingCRD, metav1.CreateOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - - var devicePath *string - opts := AttachOptions{ - Image: "image123", - Pool: "testpool", - StorageClass: "storageclass1", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - VolumeName: "pvc-123", - Pod: "myPod", - PodNamespace: "Default", - RW: "ro", - } - - att, err := attachment.New(context) - assert.Nil(t, err) - - controller := &Controller{ - context: context, - volumeAttachment: att, - volumeManager: &manager.FakeVolumeManager{}, - } - - err = controller.Attach(opts, devicePath) - assert.NotNil(t, err) - assert.Equal(t, "failed to attach volume pvc-123 for pod Default/myPod. Volume is already attached by pod Default/otherpod. Status running", err.Error()) -} - -func TestAttachRWButROAlreadyExist(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - existingCRD := &rookalpha.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-123", - Namespace: "rook-system", - }, - Attachments: []rookalpha.Attachment{ - { - Node: "node1", - PodNamespace: "Default", - PodName: "otherpod", - MountDir: "/tmt/test", - ReadOnly: true, - }, - }, - } - volumeAttachment, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Create(ctx, existingCRD, metav1.CreateOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - - var devicePath *string - opts := AttachOptions{ - Image: "image123", - Pool: "testpool", - StorageClass: "storageclass1", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - VolumeName: "pvc-123", - Pod: "myPod", - PodNamespace: "Default", - RW: "rw", - } - att, err := attachment.New(context) - assert.Nil(t, err) - - controller := &Controller{ - context: context, - volumeAttachment: att, - volumeManager: &manager.FakeVolumeManager{}, - } - - err = controller.Attach(opts, devicePath) - assert.NotNil(t, err) - assert.Equal(t, "failed to attach volume pvc-123 for pod Default/myPod. Volume is already attached by one or more pods", err.Error()) -} - -func TestMultipleAttachReadOnly(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - opts := AttachOptions{ - Image: "image123", - Pool: "testpool", - ClusterNamespace: "testCluster", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - VolumeName: "pvc-123", - Pod: "myPod", - PodNamespace: "Default", - RW: "ro", - } - existingCRD := &rookalpha.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-123", - Namespace: "rook-system", - }, - Attachments: []rookalpha.Attachment{ - { - Node: "otherNode", - PodNamespace: "Default", - PodName: "myPod", - MountDir: "/tmt/test", - ReadOnly: true, - }, - }, - } - volumeAttachment, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Create(ctx, existingCRD, metav1.CreateOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - - att, err := attachment.New(context) - assert.Nil(t, err) - - devicePath := "" - controller := &Controller{ - context: context, - volumeAttachment: att, - volumeManager: &manager.FakeVolumeManager{}, - } - - err = controller.Attach(opts, &devicePath) - assert.Nil(t, err) - - volAtt, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Get(ctx, "pvc-123", metav1.GetOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - assert.Equal(t, 2, len(volAtt.Attachments)) - - assert.True(t, containsAttachment( - rookalpha.Attachment{ - PodNamespace: opts.PodNamespace, - PodName: opts.Pod, - MountDir: opts.MountDir, - ReadOnly: true, - Node: "node1", - }, volAtt.Attachments, - ), "Volume crd does not contain expected attachment") - - assert.True(t, containsAttachment( - existingCRD.Attachments[0], volAtt.Attachments, - ), "Volume crd does not contain expected attachment") -} - -func TestOrphanAttachOriginalPodDoesntExist(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - opts := AttachOptions{ - Image: "image123", - Pool: "testpool", - ClusterNamespace: "testCluster", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - VolumeName: "pvc-123", - Pod: "newPod", - PodNamespace: "Default", - RW: "rw", - } - existingCRD := &rookalpha.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-123", - Namespace: "rook-system", - }, - Attachments: []rookalpha.Attachment{ - { - Node: "otherNode", - PodNamespace: "Default", - PodName: "oldPod", - MountDir: "/tmt/test", - ReadOnly: false, - }, - }, - } - volumeAttachment, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Create(ctx, existingCRD, metav1.CreateOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - - att, err := attachment.New(context) - assert.Nil(t, err) - - devicePath := "" - controller := &Controller{ - context: context, - volumeAttachment: att, - volumeManager: &manager.FakeVolumeManager{}, - } - - err = controller.Attach(opts, &devicePath) - assert.Nil(t, err) - - volAtt, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Get(ctx, "pvc-123", metav1.GetOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volAtt) - assert.Equal(t, 1, len(volAtt.Attachments)) - assert.True(t, containsAttachment( - rookalpha.Attachment{ - PodNamespace: opts.PodNamespace, - PodName: opts.Pod, - MountDir: opts.MountDir, - ReadOnly: false, - Node: "node1", - }, volAtt.Attachments, - ), "Volume crd does not contain expected attachment") -} - -func TestOrphanAttachOriginalPodNameSame(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - // Setting up the pod to ensure that it is exists - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "myPod", - Namespace: "Default", - UID: "pod456", - }, - Spec: v1.PodSpec{ - NodeName: "node1", - }, - } - _, err := clientset.CoreV1().Pods("Default").Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - - // existing record of old attachment. Pod namespace and name must much with the new attachment input to simulate that the new attachment is for the same pod - existingCRD := &rookalpha.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-123", - Namespace: "rook-system", - }, - Attachments: []rookalpha.Attachment{ - { - Node: "otherNode", - PodNamespace: "Default", - PodName: "myPod", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - ReadOnly: false, - }, - }, - } - - volumeAttachment, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Create(ctx, existingCRD, metav1.CreateOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - - // attachment input. The ID of the pod must be different than the original record to simulate that - // the pod resource is a different one but for the same pod metadata. This is reflected in the MountDir. - // The namespace and name, however, must match. - opts := AttachOptions{ - Image: "image123", - Pool: "testpool", - ClusterNamespace: "testCluster", - MountDir: "/test/pods/pod456/volumes/rook.io~rook/pvc-123", - VolumeName: "pvc-123", - Pod: "myPod", - PodNamespace: "Default", - RW: "rw", - } - - att, err := attachment.New(context) - assert.Nil(t, err) - - controller := &Controller{ - context: context, - volumeAttachment: att, - volumeManager: &manager.FakeVolumeManager{}, - } - - // Attach should fail because the pod is on a different node - devicePath := "" - err = controller.Attach(opts, &devicePath) - assert.Error(t, err) - - // Attach should succeed and the stale volumeattachment record should be updated to reflect the new pod information - // since the pod is restarting on the same node - os.Setenv(k8sutil.NodeNameEnvVar, "otherNode") - err = controller.Attach(opts, &devicePath) - assert.NoError(t, err) - - volAtt, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Get(ctx, "pvc-123", metav1.GetOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volAtt) - assert.Equal(t, 1, len(volAtt.Attachments)) - assert.True(t, containsAttachment( - rookalpha.Attachment{ - PodNamespace: opts.PodNamespace, - PodName: opts.Pod, - MountDir: opts.MountDir, - ReadOnly: false, - Node: "otherNode", - }, volAtt.Attachments, - ), "Volume crd does not contain expected attachment") -} - -// This tests the idempotency of the Volume record. -// If the Volume record was previously created for this pod -// and the attach flow should continue. -func TestVolumeExistAttach(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - opts := AttachOptions{ - Image: "image123", - Pool: "testpool", - ClusterNamespace: "testCluster", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - VolumeName: "pvc-123", - Pod: "myPod", - PodNamespace: "Default", - RW: "rw", - } - - existingCRD := &rookalpha.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-123", - Namespace: "rook-system", - }, - Attachments: []rookalpha.Attachment{ - { - Node: "node1", - PodNamespace: "Default", - PodName: "myPod", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - ReadOnly: false, - }, - }, - } - volumeAttachment, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Create(ctx, existingCRD, metav1.CreateOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - - att, err := attachment.New(context) - assert.Nil(t, err) - - devicePath := "" - controller := &Controller{ - context: context, - volumeAttachment: att, - volumeManager: &manager.FakeVolumeManager{}, - } - - err = controller.Attach(opts, &devicePath) - assert.Nil(t, err) - - newAttach, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Get(ctx, "pvc-123", metav1.GetOptions{}) - assert.Nil(t, err) - assert.NotNil(t, newAttach) - // TODO: Check that the volume attach was not updated (can't use ResourceVersion in the fake testing) -} - -func TestDetach(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - existingCRD := &rookalpha.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-123", - Namespace: "rook-system", - }, - Attachments: []rookalpha.Attachment{}, - } - volumeAttachment, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Create(ctx, existingCRD, metav1.CreateOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - - opts := AttachOptions{ - VolumeName: "pvc-123", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - } - - att, err := attachment.New(context) - assert.Nil(t, err) - - controller := &Controller{ - context: context, - volumeAttachment: att, - volumeManager: &manager.FakeVolumeManager{}, - } - - err = controller.Detach(opts, nil) - assert.Nil(t, err) - - _, err = context.RookClientset.RookV1alpha2().Volumes("rook-system").Get(ctx, "pvc-123", metav1.GetOptions{}) - assert.NotNil(t, err) - assert.True(t, errors.IsNotFound(err)) -} - -func TestDetachWithAttachmentLeft(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - existingCRD := &rookalpha.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-123", - Namespace: "rook-system", - }, - Attachments: []rookalpha.Attachment{ - { - Node: "node1", - PodNamespace: "Default", - PodName: "myPod", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - }, - }, - } - volumeAttachment, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Create(ctx, existingCRD, metav1.CreateOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volumeAttachment) - - opts := AttachOptions{ - VolumeName: "pvc-123", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - } - - att, err := attachment.New(context) - assert.Nil(t, err) - - controller := &Controller{ - context: context, - volumeAttachment: att, - volumeManager: &manager.FakeVolumeManager{}, - } - - err = controller.Detach(opts, nil) - assert.Nil(t, err) - - volAttach, err := context.RookClientset.RookV1alpha2().Volumes("rook-system").Get(ctx, "pvc-123", metav1.GetOptions{}) - assert.Nil(t, err) - assert.NotNil(t, volAttach) -} - -func TestGetAttachInfoFromMountDir(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.NodeNameEnvVar, "node1") - defer os.Unsetenv(k8sutil.NodeNameEnvVar) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - - pv := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-123", - }, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - FlexVolume: &v1.FlexPersistentVolumeSource{ - Driver: "ceph.rook.io/rook", - FSType: "ext4", - ReadOnly: false, - Options: map[string]string{ - StorageClassKey: "storageClass1", - PoolKey: "pool123", - ImageKey: "pvc-123", - DataBlockPoolKey: "", - }, - }, - }, - ClaimRef: &v1.ObjectReference{ - Namespace: "testnamespace", - }, - }, - } - _, err := clientset.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) - assert.NoError(t, err) - - sc := storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "storageClass1", - }, - Provisioner: "ceph.rook.io/rook", - Parameters: map[string]string{"pool": "testpool", "clusterNamespace": "testCluster", "fsType": "ext3"}, - } - _, err = clientset.StorageV1().StorageClasses().Create(ctx, &sc, metav1.CreateOptions{}) - assert.NoError(t, err) - - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "myPod", - Namespace: "testnamespace", - UID: "pod123", - }, - Spec: v1.PodSpec{ - NodeName: "node1", - }, - } - _, err = clientset.CoreV1().Pods("testnamespace").Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - - opts := AttachOptions{ - VolumeName: "pvc-123", - MountDir: "/test/pods/pod123/volumes/rook.io~rook/pvc-123", - } - - controller := &Controller{ - context: context, - volumeManager: &manager.FakeVolumeManager{}, - } - - err = controller.GetAttachInfoFromMountDir(opts.MountDir, &opts) - assert.Nil(t, err) - - assert.Equal(t, "pod123", opts.PodID) - assert.Equal(t, "pvc-123", opts.VolumeName) - assert.Equal(t, "testnamespace", opts.PodNamespace) - assert.Equal(t, "myPod", opts.Pod) - assert.Equal(t, "pvc-123", opts.Image) - assert.Equal(t, "pool123", opts.BlockPool) - assert.Equal(t, "storageClass1", opts.StorageClass) - assert.Equal(t, "testCluster", opts.ClusterNamespace) -} - -func TestParseClusterNamespace(t *testing.T) { - testParseClusterNamespace(t, "clusterNamespace") -} - -func TestParseClusterName(t *testing.T) { - testParseClusterNamespace(t, "clusterName") -} - -func testParseClusterNamespace(t *testing.T, namespaceParameter string) { - ctx := context.TODO() - clientset := test.New(t, 3) - - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookclient.NewSimpleClientset(), - } - sc := storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-storageclass", - }, - Provisioner: "ceph.rook.io/rook", - Parameters: map[string]string{"pool": "testpool", namespaceParameter: "testCluster", "fsType": "ext3"}, - } - _, err := clientset.StorageV1().StorageClasses().Create(ctx, &sc, metav1.CreateOptions{}) - assert.NoError(t, err) - - volumeAttachment, err := attachment.New(context) - assert.Nil(t, err) - - fc := &Controller{ - context: context, - volumeAttachment: volumeAttachment, - } - clusterNamespace, _ := fc.parseClusterNamespace("rook-storageclass") - assert.Equal(t, "testCluster", clusterNamespace) -} - -func TestGetPodAndPVNameFromMountDir(t *testing.T) { - mountDir := "/var/lib/kubelet/pods/b8b7c55f-99ea-11e7-8994-0800277c89a7/volumes/rook.io~rook/pvc-b8aea7f4-99ea-11e7-8994-0800277c89a7" - pod, pv, err := getPodAndPVNameFromMountDir(mountDir) - assert.Nil(t, err) - assert.Equal(t, "b8b7c55f-99ea-11e7-8994-0800277c89a7", pod) - assert.Equal(t, "pvc-b8aea7f4-99ea-11e7-8994-0800277c89a7", pv) -} - -func TestGetCRDNameFromMountDirInvalid(t *testing.T) { - mountDir := "volumes/rook.io~rook/pvc-b8aea7f4-99ea-11e7-8994-0800277c89a7" - _, _, err := getPodAndPVNameFromMountDir(mountDir) - assert.NotNil(t, err) -} - -func containsAttachment(attachment rookalpha.Attachment, attachments []rookalpha.Attachment) bool { - for _, a := range attachments { - if a.PodNamespace == attachment.PodNamespace && a.PodName == attachment.PodName && a.MountDir == attachment.MountDir && a.ReadOnly == attachment.ReadOnly && a.Node == attachment.Node { - return true - } - } - return false -} diff --git a/pkg/daemon/ceph/agent/flexvolume/driver.go b/pkg/daemon/ceph/agent/flexvolume/driver.go deleted file mode 100644 index 22512196b..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/driver.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flexvolume - -// Source: https://github.com/kubernetes/kubernetes/blob/v1.21.1/pkg/volume/flexvolume/driver-call.go - -const ( - // statusSuccess represents the successful completion of command. - statusSuccess = "Success" -) - -// driverStatus represents the return value of the driver callout. -type driverStatus struct { - // Status of the callout. One of "Success", "Failure" or "Not supported". - Status string `json:"status"` - // Reason for success/failure. - Message string `json:"message,omitempty"` - // Path to the device attached. This field is valid only for attach calls. - // ie: /dev/sdx - DevicePath string `json:"device,omitempty"` - // Cluster wide unique name of the volume. - VolumeName string `json:"volumeName,omitempty"` - // Represents volume is attached on the node - Attached bool `json:"attached,omitempty"` - // Returns capabilities of the driver. - // By default we assume all the capabilities are supported. - // If the plugin does not support a capability, it can return false for that capability. - Capabilities *driverCapabilities `json:",omitempty"` - // Returns the actual size of the volume after resizing is done, the size is in bytes. - ActualVolumeSize int64 `json:"volumeNewSize,omitempty"` -} - -// driverCapabilities represents what driver can do -type driverCapabilities struct { - Attach bool `json:"attach"` - SELinuxRelabel bool `json:"selinuxRelabel"` - SupportsMetrics bool `json:"supportsMetrics"` - FSGroup bool `json:"fsGroup"` - RequiresFSResize bool `json:"requiresFSResize"` -} diff --git a/pkg/daemon/ceph/agent/flexvolume/fake.go b/pkg/daemon/ceph/agent/flexvolume/fake.go deleted file mode 100644 index c817b6d4e..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/fake.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flexvolume - -type MockFlexvolumeController struct { - MockAttach func(attachOpts AttachOptions, devicePath *string) error - MockDetach func(detachOpts AttachOptions, _ *struct{} /* void reply */) error - MockDetachForce func(detachOpts AttachOptions, _ *struct{} /* void reply */) error - MockRemoveAttachmentObject func(detachOpts AttachOptions, safeToDetach *bool) error - MockLog func(message LogMessage, _ *struct{} /* void reply */) error - MockGetAttachInfoFromMountDir func(mountDir string, attachOptions *AttachOptions) error -} - -func (m *MockFlexvolumeController) Attach(attachOpts AttachOptions, devicePath *string) error { - if m.MockAttach != nil { - return m.MockAttach(attachOpts, devicePath) - } - return nil -} - -func (m *MockFlexvolumeController) Detach(detachOpts AttachOptions, _ *struct{} /* void reply */) error { - if m.MockDetach != nil { - return m.MockDetach(detachOpts, nil) - } - return nil -} - -func (m *MockFlexvolumeController) DetachForce(detachOpts AttachOptions, _ *struct{} /* void reply */) error { - if m.MockDetachForce != nil { - return m.MockDetachForce(detachOpts, nil) - } - return nil -} - -func (m *MockFlexvolumeController) RemoveAttachmentObject(detachOpts AttachOptions, safeToDetach *bool) error { - if m.MockRemoveAttachmentObject != nil { - return m.MockRemoveAttachmentObject(detachOpts, safeToDetach) - } - return nil -} - -func (m *MockFlexvolumeController) Log(message LogMessage, _ *struct{} /* void reply */) error { - if m.MockLog != nil { - return m.MockLog(message, nil) - } - return nil -} - -func (m *MockFlexvolumeController) GetAttachInfoFromMountDir(mountDir string, attachOptions *AttachOptions) error { - if m.MockGetAttachInfoFromMountDir != nil { - return m.MockGetAttachInfoFromMountDir(mountDir, attachOptions) - } - return nil -} diff --git a/pkg/daemon/ceph/agent/flexvolume/manager/ceph/manager.go b/pkg/daemon/ceph/agent/flexvolume/manager/ceph/manager.go deleted file mode 100644 index 56147e32e..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/manager/ceph/manager.go +++ /dev/null @@ -1,270 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ceph - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - "time" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephutil "github.com/rook/rook/pkg/daemon/ceph/util" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/util/sys" -) - -const ( - findDevicePathMaxRetries = 10 - rbdKernelModuleName = "rbd" - keyringTemplate = ` -[client.%s] -key = %s -` -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "ceph-volumeattacher") - -// VolumeManager represents an object for perform volume attachment requests for Ceph volumes -type VolumeManager struct { - context *clusterd.Context - devicePathFinder pathFinder -} - -type devicePathFinder struct{} - -// DevicePathFinder is used to find the device path after the volume has been attached -type pathFinder interface { - FindDevicePath(image, pool, clusterNamespace string) (string, error) -} - -// NewVolumeManager create attacher for ceph volumes -func NewVolumeManager(context *clusterd.Context) (*VolumeManager, error) { - vm := &VolumeManager{ - context: context, - devicePathFinder: &devicePathFinder{}, - } - err := vm.Init() - return vm, err -} - -// Init the ceph volume manager -func (vm *VolumeManager) Init() error { - // check if the rbd is a builtin kernel module, if it is then we don't need to load it manually - in, err := sys.IsBuiltinKernelModule(rbdKernelModuleName, vm.context.Executor) - if err != nil { - return err - } - if in { - logger.Noticef("volume manager is a builtin kernel module, don't load it manually") - return nil - } - - // check to see if the rbd kernel module has single_major support - hasSingleMajor, err := sys.CheckKernelModuleParam(rbdKernelModuleName, "single_major", vm.context.Executor) - if err != nil { - logger.Noticef("failed %q single_major check, assuming it's unsupported. %v", rbdKernelModuleName, err) - hasSingleMajor = false - } - - opts := []string{} - if hasSingleMajor { - opts = append(opts, "single_major=Y") - } - - // load the rbd kernel module with options - if err := sys.LoadKernelModule(rbdKernelModuleName, opts, vm.context.Executor); err != nil { - logger.Noticef("failed to load kernel module %q. %v", rbdKernelModuleName, err) - return err - } - - return nil -} - -// Attach a ceph image to the node -func (vm *VolumeManager) Attach(image, pool, id, key, clusterNamespace string) (string, error) { - // Check if the volume is already attached - devicePath, err := vm.isAttached(image, pool, clusterNamespace) - if err != nil { - return "", errors.Wrapf(err, "failed to check if volume %s/%s is already attached", pool, image) - } - if devicePath != "" { - logger.Infof("volume %s/%s is already attached. The device path is %s", pool, image, devicePath) - return devicePath, nil - } - - if id == "" && key == "" { - return "", errors.New("no id nor keyring given, can't mount without credentials") - } - - // Attach and poll until volume is mapped - logger.Infof("attaching volume %s/%s cluster %s", pool, image, clusterNamespace) - monitors, keyring, err := getClusterInfo(vm.context, clusterNamespace) - defer os.Remove(keyring) - if err != nil { - return "", errors.Wrapf(err, "failed to load cluster information from cluster %s", clusterNamespace) - } - - // Write the user given key to the keyring file - if key != "" { - keyringEval := func(key string) string { - r := fmt.Sprintf(keyringTemplate, id, key) - return r - } - if err = cephclient.WriteKeyring(keyring, key, keyringEval); err != nil { - return "", errors.Wrapf(err, "failed writing custom keyring for id %s", id) - } - } - - clusterInfo := cephclient.AdminClusterInfo(clusterNamespace) - err = cephclient.MapImage(vm.context, clusterInfo, image, pool, id, keyring, monitors) - if err != nil { - return "", errors.Wrapf(err, "failed to map image %s/%s cluster %s", pool, image, clusterNamespace) - } - - // Poll for device path - retryCount := 0 - for { - devicePath, err := vm.devicePathFinder.FindDevicePath(image, pool, clusterNamespace) - if err != nil { - return "", errors.Wrapf(err, "failed to poll for mapped image %s/%s cluster %s", pool, image, clusterNamespace) - } - - if devicePath != "" { - return devicePath, nil - } - - retryCount++ - if retryCount >= findDevicePathMaxRetries { - return "", errors.Wrap(err, "exceeded retry count while finding device path") - } - - logger.Infof("failed to find device path, sleeping 1 second. %v", err) - <-time.After(time.Second) - } -} - -func (vm *VolumeManager) Expand(image, pool, clusterNamespace string, size uint64) error { - monitors, keyring, err := getClusterInfo(vm.context, clusterNamespace) - if err != nil { - return errors.Wrapf(err, "failed to resize volume %s/%s cluster %s", pool, image, clusterNamespace) - } - clusterInfo := cephclient.AdminClusterInfo(clusterNamespace) - err = cephclient.ExpandImage(vm.context, clusterInfo, image, pool, monitors, keyring, size) - if err != nil { - return errors.Wrapf(err, "failed to resize volume %s/%s cluster %s", pool, image, clusterNamespace) - } - return nil -} - -// Detach the volume -func (vm *VolumeManager) Detach(image, pool, id, key, clusterNamespace string, force bool) error { - // check if the volume is attached - devicePath, err := vm.isAttached(image, pool, clusterNamespace) - if err != nil { - return errors.Errorf("failed to check if volume %s/%s is attached cluster %s", pool, image, clusterNamespace) - } - if devicePath == "" { - logger.Infof("volume %s/%s is already detached cluster %s", pool, image, clusterNamespace) - return nil - } - - if id == "" && key == "" { - return errors.New("no id nor keyring given, can't unmount without credentials") - } - - logger.Infof("detaching volume %s/%s cluster %s", pool, image, clusterNamespace) - monitors, keyring, err := getClusterInfo(vm.context, clusterNamespace) - defer os.Remove(keyring) - if err != nil { - return errors.Wrapf(err, "failed to load cluster information from cluster %s", clusterNamespace) - } - - // Write the user given key to the keyring file - if key != "" { - keyringEval := func(key string) string { - r := fmt.Sprintf(keyringTemplate, id, key) - return r - } - - if err = cephclient.WriteKeyring(keyring, key, keyringEval); err != nil { - return errors.Wrapf(err, "failed writing custom keyring for id %s", id) - } - } - - clusterInfo := cephclient.AdminClusterInfo(clusterNamespace) - err = cephclient.UnMapImage(vm.context, clusterInfo, image, pool, id, keyring, monitors, force) - if err != nil { - return errors.Wrapf(err, "failed to detach volume %s/%s cluster %s", pool, image, clusterNamespace) - } - logger.Infof("detached volume %s/%s", pool, image) - return nil -} - -// Check if the volume is attached -func (vm *VolumeManager) isAttached(image, pool, clusterNamespace string) (string, error) { - devicePath, err := vm.devicePathFinder.FindDevicePath(image, pool, clusterNamespace) - if err != nil { - return "", err - } - return devicePath, nil -} - -func getClusterInfo(context *clusterd.Context, clusterNamespace string) (string, string, error) { - clusterInfo, _, _, err := mon.LoadClusterInfo(context, clusterNamespace) - if err != nil { - return "", "", errors.Wrapf(err, "failed to load cluster information from cluster %s", clusterNamespace) - } - - // create temp keyring file - keyringFile, err := ioutil.TempFile("", clusterNamespace+".keyring") - if err != nil { - return "", "", err - } - - keyring := cephclient.CephKeyring(clusterInfo.CephCred) - if err := ioutil.WriteFile(keyringFile.Name(), []byte(keyring), 0600); err != nil { - return "", "", errors.Errorf("failed to write monitor keyring to %s", keyringFile.Name()) - } - - monEndpoints := make([]string, 0, len(clusterInfo.Monitors)) - for _, monitor := range clusterInfo.Monitors { - monEndpoints = append(monEndpoints, monitor.Endpoint) - } - return strings.Join(monEndpoints, ","), keyringFile.Name(), nil -} - -// FindDevicePath polls and wait for the mapped ceph image device to show up -func (f *devicePathFinder) FindDevicePath(image, pool, clusterNamespace string) (string, error) { - mappedFile, err := cephutil.FindRBDMappedFile(image, pool, cephutil.RBDSysBusPathDefault) - if err != nil { - return "", errors.Wrap(err, "failed to find mapped image") - } - - if mappedFile != "" { - devicePath := cephutil.RBDDevicePathPrefix + mappedFile - if _, err := os.Lstat(devicePath); err != nil { - return "", errors.Errorf("sysfs information for image %q in pool %q found but the rbd device path %s does not exist", image, pool, devicePath) - } - return devicePath, nil - } - return "", nil -} diff --git a/pkg/daemon/ceph/agent/flexvolume/manager/ceph/manager_test.go b/pkg/daemon/ceph/agent/flexvolume/manager/ceph/manager_test.go deleted file mode 100644 index 6d1ae9f1e..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/manager/ceph/manager_test.go +++ /dev/null @@ -1,319 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package ceph - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path" - "strings" - "testing" - "time" - - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type fakeDevicePathFinder struct { - response []string - called int -} - -func (f *fakeDevicePathFinder) FindDevicePath(image, pool, clusterNamespace string) (string, error) { - response := f.response[f.called] - f.called++ - return response, nil -} - -func TestInitLoadRBDModSingleMajor(t *testing.T) { - modInfoCalled := false - modprobeCalled := false - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - assert.Equal(t, "modinfo", command) - assert.Equal(t, "rbd", args[2]) - modInfoCalled = true - return "single_major:Use a single major number for all rbd devices (default: false) (bool)", nil - }, - MockExecuteCommand: func(command string, args ...string) error { - assert.Equal(t, "modprobe", command) - assert.Equal(t, "rbd", args[0]) - assert.Equal(t, "single_major=Y", args[1]) - modprobeCalled = true - return nil - }, - } - - context := &clusterd.Context{ - Executor: executor, - } - _, err := NewVolumeManager(context) - assert.NoError(t, err) - assert.True(t, modInfoCalled) - assert.True(t, modprobeCalled) -} - -func TestInitLoadRBDModNoSingleMajor(t *testing.T) { - modInfoCalled := false - modprobeCalled := false - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - assert.Equal(t, "modinfo", command) - assert.Equal(t, "rbd", args[2]) - modInfoCalled = true - return "", nil - }, - MockExecuteCommand: func(command string, args ...string) error { - assert.Equal(t, "modprobe", command) - assert.Equal(t, 1, len(args)) - assert.Equal(t, "rbd", args[0]) - modprobeCalled = true - return nil - }, - } - - context := &clusterd.Context{ - Executor: executor, - } - _, err := NewVolumeManager(context) - assert.NoError(t, err) - assert.True(t, modInfoCalled) - assert.True(t, modprobeCalled) -} - -func TestAttach(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - clusterNamespace := "testCluster" - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - cm := &v1.ConfigMap{ - Data: map[string]string{ - "data": "rook-ceph-mon0=10.0.0.1:6789,rook-ceph-mon1=10.0.0.2:6789,rook-ceph-mon2=10.0.0.3:6789", - }, - } - cm.Name = "rook-ceph-mon-endpoints" - _, err := clientset.CoreV1().ConfigMaps(clusterNamespace).Create(ctx, cm, metav1.CreateOptions{}) - assert.NoError(t, err) - - runCount := 1 - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if strings.Contains(command, "ceph-authtool") { - err := clienttest.CreateConfigDir(path.Join(configDir, clusterNamespace)) - assert.Nil(t, err) - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - assert.Equal(t, "rbd", command) - assert.Equal(t, "map", args[0]) - assert.Equal(t, fmt.Sprintf("testpool/image%d", runCount), args[1]) - if runCount == 1 { - assert.Equal(t, "--id=admin", args[2]) - } else { - assert.Equal(t, "--id=user1", args[2]) - } - assert.Equal(t, "--cluster=testCluster", args[3]) - assert.True(t, strings.HasPrefix(args[4], "--keyring=")) - assert.Contains(t, args[6], "10.0.0.1:6789", fmt.Sprintf("But '%s' does contain '%s'", args[6], "10.0.0.1:6789")) - assert.Contains(t, args[6], "10.0.0.2:6789", fmt.Sprintf("But '%s' does contain '%s'", args[6], "10.0.0.2:6789")) - assert.Contains(t, args[6], "10.0.0.3:6789", fmt.Sprintf("But '%s' does contain '%s'", args[6], "10.0.0.3:6789")) - runCount++ - return "", nil - }, - } - - context := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - ConfigDir: configDir, - } - vm := &VolumeManager{ - context: context, - devicePathFinder: &fakeDevicePathFinder{ - response: []string{"", "/dev/rbd3"}, - called: 0, - }, - } - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - _, _, _, err = mon.CreateOrLoadClusterInfo(context, clusterNamespace, ownerInfo) - assert.NoError(t, err) - devicePath, err := vm.Attach("image1", "testpool", "admin", "never-gonna-give-you-up", clusterNamespace) - assert.Equal(t, "/dev/rbd3", devicePath) - assert.Nil(t, err) - - vm = &VolumeManager{ - context: context, - devicePathFinder: &fakeDevicePathFinder{ - response: []string{"", "/dev/rbd4"}, - called: 0, - }, - } - - devicePath, err = vm.Attach("image2", "testpool", "user1", "never-gonna-let-you-down", clusterNamespace) - assert.Equal(t, "/dev/rbd4", devicePath) - assert.Nil(t, err) -} - -func TestAttachAlreadyExists(t *testing.T) { - vm := &VolumeManager{ - context: &clusterd.Context{}, - devicePathFinder: &fakeDevicePathFinder{ - response: []string{"/dev/rbd3"}, - called: 0, - }, - } - devicePath, err := vm.Attach("image1", "testpool", "admin", "never-gonna-run-around-and-desert-you ", "testCluster") - assert.Equal(t, "/dev/rbd3", devicePath) - assert.Nil(t, err) -} - -func TestDetach(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - clusterNamespace := "testCluster" - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - cm := &v1.ConfigMap{ - Data: map[string]string{ - "data": "rook-ceph-mon0=10.0.0.1:6789,rook-ceph-mon1=10.0.0.2:6789,rook-ceph-mon2=10.0.0.3:6789", - }, - } - cm.Name = "rook-ceph-mon-endpoints" - _, err := clientset.CoreV1().ConfigMaps(clusterNamespace).Create(ctx, cm, metav1.CreateOptions{}) - assert.NoError(t, err) - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if strings.Contains(command, "ceph-authtool") { - err := clienttest.CreateConfigDir(path.Join(configDir, clusterNamespace)) - assert.Nil(t, err) - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - assert.Equal(t, "rbd", command) - assert.Equal(t, "unmap", args[0]) - assert.Equal(t, "testpool/image1", args[1]) - assert.Equal(t, "--id=admin", args[2]) - assert.Equal(t, "--cluster=testCluster", args[3]) - assert.True(t, strings.HasPrefix(args[4], "--keyring=")) - assert.Contains(t, args[6], "10.0.0.1:6789", fmt.Sprintf("But '%s' does contain '%s'", args[6], "10.0.0.1:6789")) - assert.Contains(t, args[6], "10.0.0.2:6789", fmt.Sprintf("But '%s' does contain '%s'", args[6], "10.0.0.2:6789")) - assert.Contains(t, args[6], "10.0.0.3:6789", fmt.Sprintf("But '%s' does contain '%s'", args[6], "10.0.0.3:6789")) - return "", nil - }, - } - - context := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - ConfigDir: configDir, - } - vm := &VolumeManager{ - context: context, - devicePathFinder: &fakeDevicePathFinder{ - response: []string{"/dev/rbd3"}, - called: 0, - }, - } - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - _, _, _, err = mon.CreateOrLoadClusterInfo(context, clusterNamespace, ownerInfo) - assert.NoError(t, err) - err = vm.Detach("image1", "testpool", "admin", "", clusterNamespace, false) - assert.Nil(t, err) -} - -func TestDetachCustomKeyring(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - clusterNamespace := "testCluster" - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - cm := &v1.ConfigMap{ - Data: map[string]string{ - "data": "rook-ceph-mon0=10.0.0.1:6789,rook-ceph-mon1=10.0.0.2:6789,rook-ceph-mon2=10.0.0.3:6789", - }, - } - cm.Name = "rook-ceph-mon-endpoints" - _, err := clientset.CoreV1().ConfigMaps(clusterNamespace).Create(ctx, cm, metav1.CreateOptions{}) - assert.NoError(t, err) - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if strings.Contains(command, "ceph-authtool") { - err := clienttest.CreateConfigDir(path.Join(configDir, clusterNamespace)) - assert.Nil(t, err) - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - assert.Equal(t, "rbd", command) - assert.Equal(t, "unmap", args[0]) - assert.Equal(t, "testpool/image1", args[1]) - assert.Equal(t, "--id=user1", args[2]) - assert.Equal(t, "--cluster=testCluster", args[3]) - assert.True(t, strings.HasPrefix(args[4], "--keyring=")) - assert.Contains(t, args[6], "10.0.0.1:6789", fmt.Sprintf("But '%s' does contain '%s'", args[6], "10.0.0.1:6789")) - assert.Contains(t, args[6], "10.0.0.2:6789", fmt.Sprintf("But '%s' does contain '%s'", args[6], "10.0.0.2:6789")) - assert.Contains(t, args[6], "10.0.0.3:6789", fmt.Sprintf("But '%s' does contain '%s'", args[6], "10.0.0.3:6789")) - return "", nil - }, - } - - context := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - ConfigDir: configDir, - } - vm := &VolumeManager{ - context: context, - devicePathFinder: &fakeDevicePathFinder{ - response: []string{"/dev/rbd3"}, - called: 0, - }, - } - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - _, _, _, err = mon.CreateOrLoadClusterInfo(context, clusterNamespace, ownerInfo) - assert.NoError(t, err) - err = vm.Detach("image1", "testpool", "user1", "", clusterNamespace, false) - assert.Nil(t, err) -} - -func TestAlreadyDetached(t *testing.T) { - vm := &VolumeManager{ - context: &clusterd.Context{}, - devicePathFinder: &fakeDevicePathFinder{ - response: []string{""}, - called: 0, - }, - } - err := vm.Detach("image1", "testpool", "admin", "", "testCluster", false) - assert.Nil(t, err) -} diff --git a/pkg/daemon/ceph/agent/flexvolume/manager/fake.go b/pkg/daemon/ceph/agent/flexvolume/manager/fake.go deleted file mode 100644 index e2fd7283c..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/manager/fake.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package manager - -import "fmt" - -// FakeVolumeManager represents a fake (mocked) implementation of the VolumeManager interface for testing. -type FakeVolumeManager struct { - FakeInit func() error - FakeAttach func(image, pool, id, key, clusterName string) (string, error) - FakeDetach func(image, pool, clusterName string, force bool) error - FakeExpand func(image, pool, clusterName string, size uint64) error -} - -// Init initializes the FakeVolumeManager -func (f *FakeVolumeManager) Init() error { - if f.FakeInit != nil { - return f.FakeInit() - } - return nil -} - -// Attach a volume image to the node -func (f *FakeVolumeManager) Attach(image, pool, id, key, clusterName string) (string, error) { - if f.FakeAttach != nil { - return f.FakeAttach(image, pool, id, key, clusterName) - } - return fmt.Sprintf("/%s/%s/%s", image, pool, clusterName), nil -} - -// Detach a volume image from a node -func (f *FakeVolumeManager) Detach(image, pool, id, key, clusterName string, force bool) error { - if f.FakeDetach != nil { - return f.FakeDetach(image, pool, clusterName, force) - } - return nil -} - -func (f *FakeVolumeManager) Expand(image, pool, clusterName string, size uint64) error { - if f.FakeExpand != nil { - return f.FakeExpand(image, pool, clusterName, size) - } - return nil -} diff --git a/pkg/daemon/ceph/agent/flexvolume/server.go b/pkg/daemon/ceph/agent/flexvolume/server.go deleted file mode 100644 index baebeb07d..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/server.go +++ /dev/null @@ -1,312 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package flexvolume to manage Kubernetes storage attach events. -package flexvolume - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "net/rpc" - "os" - "os/exec" - "path" - "path/filepath" - "regexp" - "strconv" - "strings" - "time" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/ceph/agent" - "github.com/rook/rook/pkg/operator/k8sutil" -) - -const ( - UnixSocketName = ".rook.sock" - FlexvolumeVendor = "ceph.rook.io" - FlexvolumeVendorLegacy = "rook.io" - FlexDriverName = "rook" - flexvolumeDriverFileName = "rookflex" - flexMountPath = "/flexmnt/%s~%s" - usrBinDir = "/usr/local/bin/" - settingsFilename = "flex.config" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "flexvolume") - -// FlexvolumeServer start a unix domain socket server to interact with the flexvolume driver -type FlexvolumeServer struct { - context *clusterd.Context - controller *Controller - listeners map[string]net.Listener -} - -// NewFlexvolumeServer creates an Flexvolume server -func NewFlexvolumeServer(context *clusterd.Context, controller *Controller) *FlexvolumeServer { - return &FlexvolumeServer{ - context: context, - controller: controller, - listeners: make(map[string]net.Listener), - } -} - -// Start configures the flexvolume driver on the host and starts the unix domain socket server to communicate with the driver -func (s *FlexvolumeServer) Start(driverVendor, driverName string) error { - driverFile := path.Join(getRookFlexBinaryPath(), flexvolumeDriverFileName) - // /flexmnt/rook.io~rook-system - flexVolumeDriverDir := fmt.Sprintf(flexMountPath, driverVendor, driverName) - - err := configureFlexVolume(driverFile, flexVolumeDriverDir, driverName) - if err != nil { - return errors.Wrapf(err, "unable to configure flexvolume %s", flexVolumeDriverDir) - } - - unixSocketFile := path.Join(flexVolumeDriverDir, UnixSocketName) // /flextmnt/rook.io~rook-system/.rook.sock - if _, ok := s.listeners[unixSocketFile]; ok { - logger.Infof("flex server already running at %s", unixSocketFile) - return nil - } - - // remove unix socket if it existed previously - if _, err := os.Stat(unixSocketFile); !os.IsNotExist(err) { - logger.Info("Deleting unix domain socket file.") - if err := os.Remove(unixSocketFile); err != nil { - logger.Errorf("failed to remove unix socket file. %v", err) - } - } - - listener, err := net.Listen("unix", unixSocketFile) - if err != nil { - return errors.Wrapf(err, "unable to listen at %q", unixSocketFile) - } - s.listeners[unixSocketFile] = listener - - // #nosec since unixSocketFile needs the permission to execute - if err := os.Chmod(unixSocketFile, 0750); err != nil { - return errors.Wrapf(err, "unable to set file permission to unix socket %q", unixSocketFile) - } - - go rpc.Accept(listener) - - logger.Infof("listening on unix socket for Kubernetes volume attach commands %q", unixSocketFile) - return nil -} - -// StopAll Stop the unix domain socket server and deletes the socket file -func (s *FlexvolumeServer) StopAll() { - logger.Infof("Stopping %d unix socket rpc server(s).", len(s.listeners)) - for unixSocketFile, listener := range s.listeners { - if err := listener.Close(); err != nil { - logger.Errorf("failed to stop unix socket rpc server. %v", err) - } - - // closing the listener should remove the unix socket file. But lets try it remove it just in case. - if _, err := os.Stat(unixSocketFile); !os.IsNotExist(err) { - logger.Infof("deleting unix domain socket file %q.", unixSocketFile) - if err := os.Remove(unixSocketFile); err != nil { - logger.Errorf("failed to delete unix domain socker file. %v", err) - } - } - } - s.listeners = make(map[string]net.Listener) -} - -// RookDriverName return the Kubernetes version appropriate Rook driver name -func RookDriverName(context *clusterd.Context) (string, error) { - // the driver name needs to be the same as the namespace so that we can support multiple namespaces - // without the drivers conflicting with each other - return os.Getenv(k8sutil.PodNamespaceEnvVar), nil -} - -// TouchFlexDrivers causes k8s to reload the flex volumes. Needed periodically due to a k8s race condition with flex driver loading. -func TouchFlexDrivers(vendor, driverName string) { - filename := path.Join(fmt.Sprintf(flexMountPath, vendor, driverName), driverName) - logger.Debugf("reloading flex drivers. touching %q", filename) - - currenttime := time.Now().Local() - err := os.Chtimes(filename, currenttime, currenttime) - if err != nil { - logger.Warningf("failed to touch file %s", filename) - } -} - -// Encode the flex settings in json -func generateFlexSettings(enableSELinuxRelabeling, enableFSGroup bool) ([]byte, error) { - status := driverStatus{ - Status: statusSuccess, - Capabilities: &driverCapabilities{ - Attach: false, - // Required for metrics - SupportsMetrics: true, - // Required for any mount performed on a host running selinux - SELinuxRelabel: enableSELinuxRelabeling, - FSGroup: enableFSGroup, - RequiresFSResize: true, - }, - } - result, err := json.Marshal(status) - if err != nil { - return nil, errors.Wrap(err, "Invalid flex settings") - } - return result, nil -} - -// The flex settings must be loaded from a file next to the flex driver since there is context -// that can be used other than the directory where the flex driver is running. -// This method cannot write to stdout since it is running in the context of the kubelet -// which only expects the json settings to be output. -func LoadFlexSettings(directory string) []byte { - // Load the settings from the expected config file, ensure they are valid settings, then return them in - // a json string to the caller - var status driverStatus - if output, err := ioutil.ReadFile(filepath.Clean(path.Join(directory, settingsFilename))); err == nil { - if err := json.Unmarshal(output, &status); err == nil { - if output, err = json.Marshal(status); err == nil { - return output - } - } - } - - // If there is an error loading settings, set the defaults - settings, err := generateFlexSettings(true, true) - if err != nil { - // Never expect this to happen since we'll validate settings in the build - return nil - } - return settings -} - -func configureFlexVolume(driverFile, driverDir, driverName string) error { - // copying flex volume - if _, err := os.Stat(driverDir); os.IsNotExist(err) { - err := os.Mkdir(driverDir, 0750) - if err != nil { - logger.Errorf("failed to create dir %q. %v", driverDir, err) - } - } - - destFile := path.Join(driverDir, "."+driverName) // /flextmnt/rook.io~rook-system/.rook-system - finalDestFile := path.Join(driverDir, driverName) // /flextmnt/rook.io~rook-system/rook-system - err := copyFile(driverFile, destFile) - if err != nil { - return errors.Wrapf(err, "unable to copy flexvolume from %q to %q", driverFile, destFile) - } - - // renaming flex volume. Rename is an atomic execution while copying is not. - if _, err := os.Stat(finalDestFile); !os.IsNotExist(err) { - // Delete old plugin if it exists - err = os.Remove(finalDestFile) - if err != nil { - logger.Warningf("Could not delete old Rook Flexvolume driver at %q. %v", finalDestFile, err) - } - - } - - if err := os.Rename(destFile, finalDestFile); err != nil { - return errors.Wrapf(err, "failed to rename %q to %q", destFile, finalDestFile) - } - - // Write the flex configuration - enableSELinuxRelabeling, err := strconv.ParseBool(os.Getenv(agent.RookEnableSelinuxRelabelingEnv)) - if err != nil { - logger.Errorf("invalid value for disabling SELinux relabeling. %v", err) - enableSELinuxRelabeling = true - } - enableFSGroup, err := strconv.ParseBool(os.Getenv(agent.RookEnableFSGroupEnv)) - if err != nil { - logger.Errorf("invalid value for disabling fs group. %v", err) - enableFSGroup = true - } - settings, err := generateFlexSettings(enableSELinuxRelabeling, enableFSGroup) - if err != nil { - logger.Errorf("invalid flex settings. %v", err) - } else { - // #nosec since the flex settings need read permissions - if err := ioutil.WriteFile(path.Join(driverDir, settingsFilename), settings, 0644); err != nil { - logger.Errorf("failed to write settings file %q. %v", settingsFilename, err) - } else { - logger.Debugf("flex settings: %q", string(settings)) - } - } - - return nil -} - -// #nosec G307 Calling defer to close the file without checking the error return is not a risk for a simple file open and close -func copyFile(src, dest string) error { - srcFile, err := os.Open(filepath.Clean(src)) - if err != nil { - return errors.Wrapf(err, "error opening source file %s", src) - } - defer srcFile.Close() - - // #nosec G302,G304 since destFile needs the permission to execute - destFile, err := os.OpenFile(dest, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0750) // creates if file doesn't exist - if err != nil { - return errors.Wrapf(err, "error creating destination file %s", dest) - } - defer destFile.Close() - - _, err = io.Copy(destFile, srcFile) - if err != nil { - return errors.Wrapf(err, "error copying file from %s to %s", src, dest) - } - err = destFile.Sync() - if err := destFile.Close(); err != nil { - return err - } - return err -} - -// Gets the flex driver info (vendor, driver name) from a given path where the flex driver exists. -// The given path may look something like this: -// /usr/libexec/kubernetes/kubelet-plugins/volume/exec/rook.io~rook/rook/ -// In which case, the vendor is rook.io and the driver name is rook. -func getFlexDriverInfo(flexDriverPath string) (vendor, driver string, err error) { - parts := strings.Split(flexDriverPath, string(os.PathSeparator)) - for i := len(parts) - 1; i >= 0; i-- { - p := parts[i] - matched, err := regexp.Compile(".+~.+") - if err == nil { - if matched.Match([]byte(p)) { - // found a match for the flex driver directory name pattern - flexInfo := strings.Split(p, "~") - if len(flexInfo) > 2 { - return "", "", errors.Errorf("unexpected number of items in flex driver info %+v from path %s", flexInfo, flexDriverPath) - } - - return flexInfo[0], flexInfo[1], nil - } - } - } - - return "", "", errors.Errorf("failed to find flex driver info from path %s", flexDriverPath) -} - -// getRookFlexBinaryPath returns the path of rook flex volume driver -func getRookFlexBinaryPath() string { - p, err := exec.LookPath(flexvolumeDriverFileName) - if err != nil { - return usrBinDir - } - return path.Dir(p) -} diff --git a/pkg/daemon/ceph/agent/flexvolume/server_test.go b/pkg/daemon/ceph/agent/flexvolume/server_test.go deleted file mode 100644 index fd1f52a24..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/server_test.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package flexvolume to manage Kubernetes storage attach events. -package flexvolume - -import ( - "encoding/json" - "io/ioutil" - "os" - "path" - "testing" - - "github.com/rook/rook/pkg/operator/ceph/agent" - "github.com/stretchr/testify/assert" -) - -func TestConfigureFlexVolume(t *testing.T) { - driverDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(driverDir) - driverFile := path.Join(driverDir, flexvolumeDriverFileName) - _, err := os.OpenFile(driverFile, os.O_RDONLY|os.O_CREATE, 0755) - assert.NoError(t, err) - - driverName := "rook" - os.Setenv("POD_NAMESPACE", driverName) - defer os.Setenv("POD_NAMESPACE", "") - os.Setenv(agent.RookEnableSelinuxRelabelingEnv, "false") - defer os.Setenv(agent.RookEnableSelinuxRelabelingEnv, "") - os.Setenv(agent.RookEnableFSGroupEnv, "false") - defer os.Setenv(agent.RookEnableFSGroupEnv, "") - err = configureFlexVolume(driverFile, driverDir, driverName) - assert.Nil(t, err) - _, err = os.Stat(path.Join(driverDir, "rook")) - assert.False(t, os.IsNotExist(err)) - - // verify the non-default settings - settings := LoadFlexSettings(driverDir) - var status driverStatus - err = json.Unmarshal(settings, &status) - assert.Nil(t, err) - assert.False(t, status.Capabilities.FSGroup) - assert.False(t, status.Capabilities.SELinuxRelabel) -} - -func TestGetFlexDriverInfo(t *testing.T) { - // empty string, can't do anything with that, this is an error - _, _, err := getFlexDriverInfo("") - assert.NotNil(t, err) - - // no driver dir found, this is an error - _, _, err = getFlexDriverInfo("/a/b/c") - assert.NotNil(t, err) - - // well formed flex driver path, driver dir is last dir - vendor, driver, err := getFlexDriverInfo("/usr/libexec/kubernetes/kubelet-plugins/volume/exec/foo.bar.baz~biz") - assert.Nil(t, err) - assert.Equal(t, "foo.bar.baz", vendor) - assert.Equal(t, "biz", driver) - - // well formed flex driver path, driver dir is last dir but there's a trailing path separator - vendor, driver, err = getFlexDriverInfo("/usr/libexec/kubernetes/kubelet-plugins/volume/exec/foo.bar.baz~biz/") - assert.Nil(t, err) - assert.Equal(t, "foo.bar.baz", vendor) - assert.Equal(t, "biz", driver) - - // well formed flex driver path, driver dir is not the last dir in the path - vendor, driver, err = getFlexDriverInfo("/usr/libexec/kubernetes/kubelet-plugins/volume/exec/foo.bar.baz~biz/another-folder") - assert.Nil(t, err) - assert.Equal(t, "foo.bar.baz", vendor) - assert.Equal(t, "biz", driver) - - // more flex volume info items than expected, this is an error - _, _, err = getFlexDriverInfo("/usr/libexec/kubernetes/kubelet-plugins/volume/exec/foo.bar.baz~biz~buzz/") - assert.NotNil(t, err) -} diff --git a/pkg/daemon/ceph/agent/flexvolume/types.go b/pkg/daemon/ceph/agent/flexvolume/types.go deleted file mode 100644 index c4ec11956..000000000 --- a/pkg/daemon/ceph/agent/flexvolume/types.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flexvolume - -const ( - // ReadOnly mount mode - ReadOnly = "ro" - // ReadWrite mount mode - ReadWrite = "rw" -) - -// VolumeManager handles flexvolume plugin storage operations -type VolumeManager interface { - Init() error - Attach(image, pool, id, key, clusterName string) (string, error) - Detach(image, pool, id, key, clusterName string, force bool) error - Expand(image, pool, clusterName string, size uint64) error -} - -type VolumeController interface { - Attach(attachOpts AttachOptions, devicePath *string) error - Detach(detachOpts AttachOptions, _ *struct{} /* void reply */) error - DetachForce(detachOpts AttachOptions, _ *struct{} /* void reply */) error - RemoveAttachmentObject(detachOpts AttachOptions, safeToDetach *bool) error - Log(message LogMessage, _ *struct{} /* void reply */) error - GetAttachInfoFromMountDir(mountDir string, attachOptions *AttachOptions) error -} - -type AttachOptions struct { - Image string `json:"image"` - BlockPool string `json:"blockPool"` - Pool string `json:"pool"` - ClusterNamespace string `json:"clusterNamespace"` - ClusterName string `json:"clusterName"` - StorageClass string `json:"storageClass"` - MountDir string `json:"mountDir"` - FsName string `json:"fsName"` - Path string `json:"path"` // Path within the CephFS to mount - MountUser string `json:"mountUser"` - MountSecret string `json:"mountSecret"` - RW string `json:"kubernetes.io/readwrite"` - FsType string `json:"kubernetes.io/fsType"` - FsGroup string `json:"kubernetes.io/fsGroup"` - VolumeName string `json:"kubernetes.io/pvOrVolumeName"` // only available on 1.7 - Pod string `json:"kubernetes.io/pod.name"` - PodID string `json:"kubernetes.io/pod.uid"` - PodNamespace string `json:"kubernetes.io/pod.namespace"` -} - -type ExpandOptions struct { - Pool string `json:"pool"` - RW string `json:"kubernetes.io/readwrite"` - ClusterNamespace string `json:"clusterNamespace"` - DataBlockPool string `json:"dataBlockPool"` - Image string `json:"image"` - FsType string `json:"kubernetes.io/fsType"` - StorageClass string `json:"storageClass"` - VolumeName string `json:"kubernetes.io/pvOrVolumeName"` -} - -type ExpandArgs struct { - ExpandOptions *ExpandOptions - Size uint64 -} - -type LogMessage struct { - Message string `json:"message"` - IsError bool `json:"isError"` -} - -type GlobalMountPathInput struct { - VolumeName string `json:"volumeName"` - DriverDir string `json:"driverDir"` -} diff --git a/pkg/daemon/ceph/cleanup/disk.go b/pkg/daemon/ceph/cleanup/disk.go deleted file mode 100644 index 18116b7c6..000000000 --- a/pkg/daemon/ceph/cleanup/disk.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cleanup - -import ( - "fmt" - "strconv" - "strings" - "sync" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/daemon/ceph/osd" - oposd "github.com/rook/rook/pkg/operator/ceph/cluster/osd" -) - -const ( - shredUtility = "shred" - shredBS = "10M" // Shred's block size -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "cleanup") -) - -// DiskSanitizer is simple struct to old the context to execute the commands -type DiskSanitizer struct { - context *clusterd.Context - clusterInfo *client.ClusterInfo - sanitizeDisksSpec *cephv1.SanitizeDisksSpec -} - -// NewDiskSanitizer is function that returns a full filled DiskSanitizer object -func NewDiskSanitizer(context *clusterd.Context, clusterInfo *client.ClusterInfo, sanitizeDisksSpec *cephv1.SanitizeDisksSpec) *DiskSanitizer { - return &DiskSanitizer{ - context: context, - clusterInfo: clusterInfo, - sanitizeDisksSpec: sanitizeDisksSpec, - } -} - -// StartSanitizeDisks main entrypoint of the cleanup package -func (s *DiskSanitizer) StartSanitizeDisks() { - // LVM based OSDs - osdLVMList, err := osd.GetCephVolumeLVMOSDs(s.context, s.clusterInfo, s.clusterInfo.FSID, "", false, false) - if err != nil { - logger.Errorf("failed to list lvm osd(s). %v", err) - } else { - // Start the sanitizing sequence - s.sanitizeLVMDisk(osdLVMList) - } - - // Raw based OSDs - osdRawList, err := osd.GetCephVolumeRawOSDs(s.context, s.clusterInfo, s.clusterInfo.FSID, "", "", "", false, true) - if err != nil { - logger.Errorf("failed to list raw osd(s). %v", err) - } else { - // Start the sanitizing sequence - s.sanitizeRawDisk(osdRawList) - } -} - -func (s *DiskSanitizer) sanitizeRawDisk(osdRawList []oposd.OSDInfo) { - // Initialize work group to wait for completion of all the go routine - var wg sync.WaitGroup - - for _, osd := range osdRawList { - logger.Infof("sanitizing osd %d disk %q", osd.ID, osd.BlockPath) - - // Increment the wait group counter - wg.Add(1) - - // Put each sanitize in a go routine to speed things up - go s.executeSanitizeCommand(osd.BlockPath, &wg) - } - - wg.Wait() -} - -func (s *DiskSanitizer) sanitizeLVMDisk(osdLVMList []oposd.OSDInfo) { - // Initialize work group to wait for completion of all the go routine - var wg sync.WaitGroup - pvs := []string{} - - for _, osd := range osdLVMList { - // Increment the wait group counter - wg.Add(1) - - // Lookup the PV associated to the LV - pvs = append(pvs, s.returnPVDevice(osd.BlockPath)[0]) - - // run c-v - go s.wipeLVM(osd.ID, &wg) - } - // Wait for ceph-volume to finish before wiping the remaining Physical Volume data - wg.Wait() - - var wg2 sync.WaitGroup - // // purge remaining LVM2 metadata from PV - for _, pv := range pvs { - wg2.Add(1) - go s.executeSanitizeCommand(pv, &wg2) - } - wg2.Wait() -} - -func (s *DiskSanitizer) wipeLVM(osdID int, wg *sync.WaitGroup) { - // On return, notify the WaitGroup that we’re done - defer wg.Done() - - output, err := s.context.Executor.ExecuteCommandWithCombinedOutput("stdbuf", "-oL", "ceph-volume", "lvm", "zap", "--osd-id", strconv.Itoa(osdID), "--destroy") - if err != nil { - logger.Errorf("failed to sanitize osd %d. %s. %v", osdID, output, err) - } - - logger.Infof("%s\n", output) - logger.Infof("successfully sanitized lvm osd %d", osdID) -} - -func (s *DiskSanitizer) returnPVDevice(disk string) []string { - output, err := s.context.Executor.ExecuteCommandWithOutput("lvs", disk, "-o", "seg_pe_ranges", "--noheadings") - if err != nil { - logger.Errorf("failed to execute lvs command. %v", err) - return []string{} - } - - logger.Infof("output: %s", output) - return strings.Split(output, ":") -} - -func (s *DiskSanitizer) buildDataSource() string { - return fmt.Sprintf("/dev/%s", s.sanitizeDisksSpec.DataSource.String()) -} - -func (s *DiskSanitizer) buildShredArgs(disk string) []string { - var shredArgs []string - - // If data source is not zero, then let's add zeros at the end of the pass - if s.sanitizeDisksSpec.DataSource != cephv1.SanitizeDataSourceZero { - shredArgs = append(shredArgs, "--zero") - } - - // If this is a quick pass let's just overwrite the first 10MB - if s.sanitizeDisksSpec.Method == cephv1.SanitizeMethodQuick { - shredArgs = append(shredArgs, fmt.Sprintf("--size=%s", shredBS)) - } - - // If the data source for randomness is zero - if s.sanitizeDisksSpec.DataSource == cephv1.SanitizeDataSourceZero { - shredArgs = append(shredArgs, fmt.Sprintf("--random-source=%s", s.buildDataSource())) - } - - shredArgs = append(shredArgs, []string{ - "--force", - "--verbose", - fmt.Sprintf("--iterations=%s", strconv.Itoa(int(s.sanitizeDisksSpec.Iteration))), - disk}...) - - return shredArgs -} - -func (s *DiskSanitizer) executeSanitizeCommand(disk string, wg *sync.WaitGroup) { - // On return, notify the WaitGroup that we’re done - defer wg.Done() - - output, err := s.context.Executor.ExecuteCommandWithCombinedOutput(shredUtility, s.buildShredArgs(disk)...) - if err != nil { - logger.Errorf("failed to sanitize osd disk %q. %s. %v", disk, output, err) - } - - logger.Infof("%s\n", output) - logger.Infof("successfully sanitized osd disk %q", disk) -} diff --git a/pkg/daemon/ceph/cleanup/disk_test.go b/pkg/daemon/ceph/cleanup/disk_test.go deleted file mode 100644 index 73667bc23..000000000 --- a/pkg/daemon/ceph/cleanup/disk_test.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cleanup - -import ( - "reflect" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/stretchr/testify/assert" -) - -func TestBuildDataSource(t *testing.T) { - s := NewDiskSanitizer(&clusterd.Context{}, &client.ClusterInfo{}, &cephv1.SanitizeDisksSpec{}) - s.sanitizeDisksSpec.DataSource = cephv1.SanitizeDataSourceZero - - assert.Equal(t, "/dev/zero", s.buildDataSource()) -} - -func TestBuildShredArgs(t *testing.T) { - var i int32 = 1 - c := &clusterd.Context{} - disk := "/dev/sda" - type fields struct { - context *clusterd.Context - clusterInfo *client.ClusterInfo - sanitizeDisksSpec *cephv1.SanitizeDisksSpec - } - tests := []struct { - name string - fields fields - disk string - want []string - }{ - {"quick-zero", fields{c, &client.ClusterInfo{}, &cephv1.SanitizeDisksSpec{Method: cephv1.SanitizeMethodQuick, Iteration: i, DataSource: cephv1.SanitizeDataSourceZero}}, disk, []string{"--size=10M", "--random-source=/dev/zero", "--force", "--verbose", "--iterations=1", "/dev/sda"}}, - {"quick-random", fields{c, &client.ClusterInfo{}, &cephv1.SanitizeDisksSpec{Method: cephv1.SanitizeMethodQuick, Iteration: i, DataSource: cephv1.SanitizeDataSourceRandom}}, disk, []string{"--zero", "--size=10M", "--force", "--verbose", "--iterations=1", "/dev/sda"}}, - {"complete-zero", fields{c, &client.ClusterInfo{}, &cephv1.SanitizeDisksSpec{Method: cephv1.SanitizeMethodComplete, Iteration: i, DataSource: cephv1.SanitizeDataSourceZero}}, disk, []string{"--random-source=/dev/zero", "--force", "--verbose", "--iterations=1", "/dev/sda"}}, - {"complete-random", fields{c, &client.ClusterInfo{}, &cephv1.SanitizeDisksSpec{Method: cephv1.SanitizeMethodComplete, Iteration: i, DataSource: cephv1.SanitizeDataSourceRandom}}, disk, []string{"--zero", "--force", "--verbose", "--iterations=1", "/dev/sda"}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &DiskSanitizer{ - context: tt.fields.context, - clusterInfo: tt.fields.clusterInfo, - sanitizeDisksSpec: tt.fields.sanitizeDisksSpec, - } - if got := s.buildShredArgs(tt.disk); !reflect.DeepEqual(got, tt.want) { - t.Errorf("DiskSanitizer.buildShredArgs() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/daemon/ceph/cleanup/hostpath.go b/pkg/daemon/ceph/cleanup/hostpath.go deleted file mode 100644 index 88eb33789..000000000 --- a/pkg/daemon/ceph/cleanup/hostpath.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cleanup - -import ( - "io/ioutil" - "os" - "path" - "path/filepath" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" -) - -// StartHostPathCleanup is the main entrypoint function to clean up dataDirHostPath and monitor store -func StartHostPathCleanup(namespaceDir, dataDirHostPath, monSecret string) { - cleanupDirPath := path.Join(dataDirHostPath, namespaceDir) - if err := os.RemoveAll(cleanupDirPath); err != nil { - logger.Errorf("failed to clean up %q directory. %v", cleanupDirPath, err) - } else { - logger.Infof("successfully cleaned up %q directory", cleanupDirPath) - } - - cleanMonDirs(dataDirHostPath, monSecret) -} - -func cleanMonDirs(dataDirHostPath, monSecret string) { - monDirs, err := filepath.Glob(path.Join(dataDirHostPath, "mon-*")) - if err != nil { - logger.Errorf("failed to find the mon directories on the dataDirHostPath %q. %v", dataDirHostPath, err) - return - } - - if len(monDirs) == 0 { - logger.Infof("no mon directories are available for clean up in the dataDirHostPath %q", dataDirHostPath) - return - } - - for _, monDir := range monDirs { - // Clean up mon directory only if mon secret matches with that in the keyring file. - deleteMonDir, err := secretKeyMatch(monDir, monSecret) - if err != nil { - logger.Errorf("failed to clean up the mon directory %q on the dataDirHostPath %q. %v", monDir, dataDirHostPath, err) - continue - } - if deleteMonDir { - if err := os.RemoveAll(monDir); err != nil { - logger.Errorf("failed to clean up the mon directory %q on the dataDirHostPath %q. %v", monDir, dataDirHostPath, err) - } else { - logger.Infof("successfully cleaned up the mon directory %q on the dataDirHostPath %q", monDir, dataDirHostPath) - } - } else { - logger.Infof("skipped clean up of the mon directory %q as the secret key did not match", monDir) - } - } -} - -func secretKeyMatch(monDir, monSecret string) (bool, error) { - keyringDirPath := path.Join(monDir, "/data/keyring") - if _, err := os.Stat(keyringDirPath); os.IsNotExist(err) { - return false, errors.Wrapf(err, "failed to read keyring %q for the mon directory %q", keyringDirPath, monDir) - } - contents, err := ioutil.ReadFile(filepath.Clean(keyringDirPath)) - if err != nil { - return false, errors.Wrapf(err, "failed to read keyring %q for the mon directory %q", keyringDirPath, monDir) - } - extractedKey, err := mon.ExtractKey(string(contents)) - if err != nil { - return false, errors.Wrapf(err, "failed to extract secret key from the keyring %q for the mon directory %q", keyringDirPath, monDir) - } - - return monSecret == extractedKey, nil -} diff --git a/pkg/daemon/ceph/client/auth.go b/pkg/daemon/ceph/client/auth.go deleted file mode 100644 index ab5eb7da5..000000000 --- a/pkg/daemon/ceph/client/auth.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "encoding/json" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" -) - -// AuthGetOrCreate will either get or create a user with the given capabilities. The keyring for the -// user will be written to the given keyring path. -func AuthGetOrCreate(context *clusterd.Context, clusterInfo *ClusterInfo, name, keyringPath string, caps []string) error { - logger.Infof("getting or creating ceph auth %q", name) - args := append([]string{"auth", "get-or-create", name, "-o", keyringPath}, caps...) - cmd := NewCephCommand(context, clusterInfo, args) - cmd.JsonOutput = false - _, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to auth get-or-create for %s", name) - } - - return nil -} - -// AuthGetKey gets the key for the given user. -func AuthGetKey(context *clusterd.Context, clusterInfo *ClusterInfo, name string) (string, error) { - logger.Infof("getting ceph auth key %q", name) - args := []string{"auth", "get-key", name} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return "", errors.Wrapf(err, "failed to get key for %s", name) - } - - return parseAuthKey(buf) -} - -// AuthGetOrCreateKey gets or creates the key for the given user. -func AuthGetOrCreateKey(context *clusterd.Context, clusterInfo *ClusterInfo, name string, caps []string) (string, error) { - logger.Infof("getting or creating ceph auth key %q", name) - args := append([]string{"auth", "get-or-create-key", name}, caps...) - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return "", errors.Wrapf(err, "failed get-or-create-key %s", name) - } - - return parseAuthKey(buf) -} - -// AuthUpdateCaps updates the capabilities for the given user. -func AuthUpdateCaps(context *clusterd.Context, clusterInfo *ClusterInfo, name string, caps []string) error { - logger.Infof("updating ceph auth caps %q to %v", name, caps) - args := append([]string{"auth", "caps", name}, caps...) - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to update caps for %s", name) - } - return err -} - -// AuthGetCaps gets the capabilities for the given user. -func AuthGetCaps(context *clusterd.Context, clusterInfo *ClusterInfo, name string) (caps map[string]string, error error) { - logger.Infof("getting ceph auth caps for %q", name) - args := []string{"auth", "get", name} - output, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to get caps for %q", name) - } - - var data []map[string]interface{} - err = json.Unmarshal(output, &data) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal auth get response") - } - caps = make(map[string]string) - - if data[0]["caps"].(map[string]interface{})["mon"] != nil { - caps["mon"] = data[0]["caps"].(map[string]interface{})["mon"].(string) - } - if data[0]["caps"].(map[string]interface{})["mds"] != nil { - caps["mds"] = data[0]["caps"].(map[string]interface{})["mds"].(string) - } - if data[0]["caps"].(map[string]interface{})["mgr"] != nil { - caps["mgr"] = data[0]["caps"].(map[string]interface{})["mgr"].(string) - } - if data[0]["caps"].(map[string]interface{})["osd"] != nil { - caps["osd"] = data[0]["caps"].(map[string]interface{})["osd"].(string) - } - - return caps, err -} - -// AuthDelete will delete the given user. -func AuthDelete(context *clusterd.Context, clusterInfo *ClusterInfo, name string) error { - logger.Infof("deleting ceph auth %q", name) - args := []string{"auth", "del", name} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to delete auth for %s", name) - } - return nil -} - -func parseAuthKey(buf []byte) (string, error) { - var resp map[string]interface{} - if err := json.Unmarshal(buf, &resp); err != nil { - return "", errors.Wrap(err, "failed to unmarshal get/create key response") - } - return resp["key"].(string), nil -} diff --git a/pkg/daemon/ceph/client/command.go b/pkg/daemon/ceph/client/command.go deleted file mode 100644 index fcfdea3ea..000000000 --- a/pkg/daemon/ceph/client/command.go +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "fmt" - "path" - "strconv" - "time" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/util/exec" -) - -// RunAllCephCommandsInToolboxPod - when running the e2e tests, all ceph commands need to be run in the toolbox. -// Everywhere else, the ceph tools are assumed to be in the container where we can shell out. -// This is the name of the pod. -var RunAllCephCommandsInToolboxPod string - -const ( - // AdminUsername is the name of the admin user - AdminUsername = "client.admin" - // CephTool is the name of the CLI tool for 'ceph' - CephTool = "ceph" - // RBDTool is the name of the CLI tool for 'rbd' - RBDTool = "rbd" - // Kubectl is the name of the CLI tool for 'kubectl' - Kubectl = "kubectl" - // CrushTool is the name of the CLI tool for 'crushtool' - CrushTool = "crushtool" - // DefaultPGCount will cause Ceph to use the internal default PG count - DefaultPGCount = "0" - // CommandProxyInitContainerName is the name of the init container for proxying ceph command when multus is used - CommandProxyInitContainerName = "cmd-proxy" - // ProxyAppLabel is the label used to identify the proxy container - ProxyAppLabel = "rook-ceph-mgr" -) - -// CephConfFilePath returns the location to the cluster's config file in the operator container. -func CephConfFilePath(configDir, clusterName string) string { - confFile := fmt.Sprintf("%s.config", clusterName) - return path.Join(configDir, clusterName, confFile) -} - -// FinalizeCephCommandArgs builds the command line to be called -func FinalizeCephCommandArgs(command string, clusterInfo *ClusterInfo, args []string, configDir string) (string, []string) { - // the rbd client tool does not support the '--connect-timeout' option - // so we only use it for the 'ceph' command - // Also, there is no point of adding that option to 'crushtool' since that CLI does not connect to anything - // 'crushtool' is a utility that lets you create, compile, decompile and test CRUSH map files. - - // we could use a slice and iterate over it but since we have only 3 elements - // I don't think this is worth a loop - timeout := strconv.Itoa(int(exec.CephCommandsTimeout.Seconds())) - if command != "rbd" && command != "crushtool" && command != "radosgw-admin" { - args = append(args, "--connect-timeout="+timeout) - } - - // If the command should be run inside the toolbox pod, include the kubectl args to call the toolbox - if RunAllCephCommandsInToolboxPod != "" { - toolArgs := []string{"exec", "-i", RunAllCephCommandsInToolboxPod, "-n", clusterInfo.Namespace, - "--", "timeout", timeout, command} - return Kubectl, append(toolArgs, args...) - } - - // Append the args to find the config and keyring - keyringFile := fmt.Sprintf("%s.keyring", clusterInfo.CephCred.Username) - configArgs := []string{ - fmt.Sprintf("--cluster=%s", clusterInfo.Namespace), - fmt.Sprintf("--conf=%s", CephConfFilePath(configDir, clusterInfo.Namespace)), - fmt.Sprintf("--name=%s", clusterInfo.CephCred.Username), - fmt.Sprintf("--keyring=%s", path.Join(configDir, clusterInfo.Namespace, keyringFile)), - } - return command, append(args, configArgs...) -} - -type CephToolCommand struct { - context *clusterd.Context - clusterInfo *ClusterInfo - tool string - args []string - timeout time.Duration - JsonOutput bool - RemoteExecution bool -} - -func newCephToolCommand(tool string, context *clusterd.Context, clusterInfo *ClusterInfo, args []string) *CephToolCommand { - return &CephToolCommand{ - context: context, - tool: tool, - clusterInfo: clusterInfo, - args: args, - JsonOutput: true, - } -} - -func NewCephCommand(context *clusterd.Context, clusterInfo *ClusterInfo, args []string) *CephToolCommand { - return newCephToolCommand(CephTool, context, clusterInfo, args) -} - -func NewRBDCommand(context *clusterd.Context, clusterInfo *ClusterInfo, args []string) *CephToolCommand { - cmd := newCephToolCommand(RBDTool, context, clusterInfo, args) - cmd.JsonOutput = false - - // When Multus is enabled, the RBD tool should run inside the proxy container - if clusterInfo.NetworkSpec.IsMultus() { - cmd.RemoteExecution = true - } - - return cmd -} - -func (c *CephToolCommand) run() ([]byte, error) { - command, args := FinalizeCephCommandArgs(c.tool, c.clusterInfo, c.args, c.context.ConfigDir) - if c.JsonOutput { - args = append(args, "--format", "json") - } else { - // the `rbd` tool doesn't use special flag for plain format - if c.tool != RBDTool { - args = append(args, "--format", "plain") - } - } - - var output, stderr string - var err error - - // NewRBDCommand does not use the --out-file option so we only check for remote execution here - // Still forcing the check for the command if the behavior changes in the future - if command == RBDTool { - if c.RemoteExecution { - output, stderr, err = c.context.RemoteExecutor.ExecCommandInContainerWithFullOutputWithTimeout(ProxyAppLabel, CommandProxyInitContainerName, c.clusterInfo.Namespace, append([]string{command}, args...)...) - output = fmt.Sprintf("%s.%s", output, stderr) - } else if c.timeout == 0 { - output, err = c.context.Executor.ExecuteCommandWithOutput(command, args...) - } else { - output, err = c.context.Executor.ExecuteCommandWithTimeout(c.timeout, command, args...) - } - } else if c.timeout == 0 { - output, err = c.context.Executor.ExecuteCommandWithOutput(command, args...) - } else { - output, err = c.context.Executor.ExecuteCommandWithTimeout(c.timeout, command, args...) - } - - return []byte(output), err -} - -func (c *CephToolCommand) Run() ([]byte, error) { - c.timeout = 0 - return c.run() -} - -func (c *CephToolCommand) RunWithTimeout(timeout time.Duration) ([]byte, error) { - c.timeout = timeout - return c.run() -} - -// ExecuteRBDCommandWithTimeout executes the 'rbd' command with a timeout of 1 -// minute. This method is left as a special case in which the caller has fully -// configured its arguments. It is future work to integrate this case into the -// generalization. -func ExecuteRBDCommandWithTimeout(context *clusterd.Context, args []string) (string, error) { - output, err := context.Executor.ExecuteCommandWithTimeout(exec.CephCommandsTimeout, RBDTool, args...) - return output, err -} - -func ExecuteCephCommandWithRetry( - cmd func() (string, []byte, error), - getExitCode func(err error) (int, bool), - retries int, - retryOnExitCode int, - waitTime time.Duration, -) ([]byte, error) { - for i := 0; i < retries; i++ { - action, data, err := cmd() - if err != nil { - exitCode, parsed := getExitCode(err) - if parsed { - if exitCode == retryOnExitCode { - logger.Infof("command failed for %s. trying again...", action) - time.Sleep(waitTime) - continue - } - } - return nil, errors.Wrapf(err, "failed to complete command for %s", action) - } - if i > 0 { - logger.Infof("action %s succeeded on attempt %d", action, i) - } - return data, nil - } - return nil, errors.New("max command retries exceeded") -} diff --git a/pkg/daemon/ceph/client/command_test.go b/pkg/daemon/ceph/client/command_test.go deleted file mode 100644 index aed9bb3ca..000000000 --- a/pkg/daemon/ceph/client/command_test.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "strconv" - "testing" - "time" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/test" - "github.com/rook/rook/pkg/util/exec" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestFinalizeCephCommandArgs(t *testing.T) { - RunAllCephCommandsInToolboxPod = "" - configDir := "/var/lib/rook/rook-ceph" - expectedCommand := "ceph" - args := []string{"quorum_status"} - expectedArgs := []string{ - "quorum_status", - "--connect-timeout=" + strconv.Itoa(int(exec.CephCommandsTimeout.Seconds())), - "--cluster=rook", - "--conf=/var/lib/rook/rook-ceph/rook/rook.config", - "--name=client.admin", - "--keyring=/var/lib/rook/rook-ceph/rook/client.admin.keyring", - } - - clusterInfo := AdminClusterInfo("rook") - cmd, args := FinalizeCephCommandArgs(expectedCommand, clusterInfo, args, configDir) - assert.Exactly(t, expectedCommand, cmd) - assert.Exactly(t, expectedArgs, args) -} - -func TestFinalizeRadosGWAdminCommandArgs(t *testing.T) { - RunAllCephCommandsInToolboxPod = "" - configDir := "/var/lib/rook/rook-ceph" - expectedCommand := "radosgw-admin" - args := []string{ - "realm", - "create", - "--default", - "--rgw-realm=default-rook", - "--rgw-zonegroup=default-rook", - } - - expectedArgs := []string{ - "realm", - "create", - "--default", - "--rgw-realm=default-rook", - "--rgw-zonegroup=default-rook", - "--cluster=rook", - "--conf=/var/lib/rook/rook-ceph/rook/rook.config", - "--name=client.admin", - "--keyring=/var/lib/rook/rook-ceph/rook/client.admin.keyring", - } - - clusterInfo := AdminClusterInfo("rook") - cmd, args := FinalizeCephCommandArgs(expectedCommand, clusterInfo, args, configDir) - assert.Exactly(t, expectedCommand, cmd) - assert.Exactly(t, expectedArgs, args) -} - -func TestFinalizeCephCommandArgsToolBox(t *testing.T) { - RunAllCephCommandsInToolboxPod = "rook-ceph-tools" - configDir := "/var/lib/rook/rook-ceph" - expectedCommand := "ceph" - args := []string{"health"} - expectedArgs := []string{ - "exec", - "-i", - "rook-ceph-tools", - "-n", - "rook", - "--", - "timeout", - "15", - "ceph", - "health", - "--connect-timeout=15", - } - - clusterInfo := AdminClusterInfo("rook") - exec.CephCommandsTimeout = 15 * time.Second - cmd, args := FinalizeCephCommandArgs(expectedCommand, clusterInfo, args, configDir) - assert.Exactly(t, "kubectl", cmd) - assert.Exactly(t, expectedArgs, args) - RunAllCephCommandsInToolboxPod = "" -} - -func TestNewRBDCommand(t *testing.T) { - args := []string{"create", "--size", "1G", "myvol"} - - t.Run("rbd command with no multus", func(t *testing.T) { - clusterInfo := AdminClusterInfo("rook") - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - switch { - case command == "rbd" && args[0] == "create": - return "success", nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - context := &clusterd.Context{Executor: executor} - cmd := NewRBDCommand(context, clusterInfo, args) - assert.False(t, cmd.RemoteExecution) - output, err := cmd.Run() - assert.NoError(t, err) - assert.Equal(t, "success", string(output)) - - }) - t.Run("rbd command with multus", func(t *testing.T) { - clusterInfo := AdminClusterInfo("rook") - clusterInfo.NetworkSpec.Provider = "multus" - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor, RemoteExecutor: exec.RemotePodCommandExecutor{ClientSet: test.New(t, 3)}} - cmd := NewRBDCommand(context, clusterInfo, args) - assert.True(t, cmd.RemoteExecution) - _, err := cmd.Run() - assert.Error(t, err) - // This is not the best but it shows we go through the right codepath - assert.EqualError(t, err, "no pods found with selector \"rook-ceph-mgr\"") - }) - -} diff --git a/pkg/daemon/ceph/client/config.go b/pkg/daemon/ceph/client/config.go deleted file mode 100644 index ccc752d7d..000000000 --- a/pkg/daemon/ceph/client/config.go +++ /dev/null @@ -1,310 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package client provides methods for creating and formatting Ceph configuration files for daemons. -package client - -import ( - "context" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "path/filepath" - "strconv" - "strings" - - "github.com/rook/rook/pkg/operator/k8sutil" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/coreos/pkg/capnslog" - "github.com/go-ini/ini" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - cephutil "github.com/rook/rook/pkg/daemon/ceph/util" - cephver "github.com/rook/rook/pkg/operator/ceph/version" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "cephclient") - -const ( - // DefaultKeyringFile is the default name of the file where Ceph stores its keyring info - DefaultKeyringFile = "keyring" - // Msgr2port is the listening port of the messenger v2 protocol - Msgr2port = 3300 -) - -var ( - // DefaultConfigDir is the default dir where Ceph stores its configs. Can be overridden for unit - // tests. - DefaultConfigDir = "/etc/ceph" - - // DefaultConfigFile is the default name of the file where Ceph stores its configs. Can be - // overridden for unit tests. - DefaultConfigFile = "ceph.conf" -) - -// GlobalConfig represents the [global] sections of Ceph's config file. -type GlobalConfig struct { - FSID string `ini:"fsid,omitempty"` - MonMembers string `ini:"mon initial members,omitempty"` - MonHost string `ini:"mon host"` - PublicAddr string `ini:"public addr,omitempty"` - PublicNetwork string `ini:"public network,omitempty"` - ClusterAddr string `ini:"cluster addr,omitempty"` - ClusterNetwork string `ini:"cluster network,omitempty"` -} - -// CephConfig represents an entire Ceph config including all sections. -type CephConfig struct { - *GlobalConfig `ini:"global,omitempty"` -} - -// DefaultConfigFilePath returns the full path to Ceph's default config file -func DefaultConfigFilePath() string { - return path.Join(DefaultConfigDir, DefaultConfigFile) -} - -// getConfFilePath gets the path of a given cluster's config file -func getConfFilePath(root, clusterName string) string { - return fmt.Sprintf("%s/%s.config", root, clusterName) -} - -// GenerateConnectionConfig calls GenerateConnectionConfigWithSettings with no settings -// overridden. -func GenerateConnectionConfig(context *clusterd.Context, cluster *ClusterInfo) (string, error) { - return GenerateConnectionConfigWithSettings(context, cluster, nil) -} - -// GenerateConnectionConfigWithSettings generates a Ceph config and keyring which will allow -// the daemon to connect. Default config file settings can be overridden by specifying -// some subset of settings. -func GenerateConnectionConfigWithSettings(context *clusterd.Context, clusterInfo *ClusterInfo, settings *CephConfig) (string, error) { - root := path.Join(context.ConfigDir, clusterInfo.Namespace) - keyringPath := path.Join(root, fmt.Sprintf("%s.keyring", clusterInfo.CephCred.Username)) - err := writeKeyring(CephKeyring(clusterInfo.CephCred), keyringPath) - if err != nil { - return "", errors.Wrapf(err, "failed to write keyring %q to %s", clusterInfo.CephCred.Username, root) - } - - filePath, err := generateConfigFile(context, clusterInfo, root, keyringPath, settings, nil) - if err != nil { - return "", errors.Wrapf(err, "failed to write config to %s", root) - } - logger.Infof("generated admin config in %s", root) - return filePath, nil -} - -// generateConfigFile generates and writes a config file to disk. -func generateConfigFile(context *clusterd.Context, clusterInfo *ClusterInfo, pathRoot, keyringPath string, globalConfig *CephConfig, clientSettings map[string]string) (string, error) { - - // create the config directory - if err := os.MkdirAll(pathRoot, 0744); err != nil { - logger.Warningf("failed to create config directory at %q. %v", pathRoot, err) - } - - configFile, err := createGlobalConfigFileSection(context, clusterInfo, globalConfig) - if err != nil { - return "", errors.Wrap(err, "failed to create global config section") - } - - if err := mergeDefaultConfigWithRookConfigOverride(context, clusterInfo, configFile); err != nil { - return "", errors.Wrapf(err, "failed to merge global config with %q", k8sutil.ConfigOverrideName) - } - - qualifiedUser := getQualifiedUser(clusterInfo.CephCred.Username) - if err := addClientConfigFileSection(configFile, qualifiedUser, keyringPath, clientSettings); err != nil { - return "", errors.Wrap(err, "failed to add admin client config section") - } - - // write the entire config to disk - filePath := getConfFilePath(pathRoot, clusterInfo.Namespace) - logger.Infof("writing config file %s", filePath) - if err := configFile.SaveTo(filePath); err != nil { - return "", errors.Wrapf(err, "failed to save config file %s", filePath) - } - - return filePath, nil -} - -func mergeDefaultConfigWithRookConfigOverride(clusterdContext *clusterd.Context, clusterInfo *ClusterInfo, configFile *ini.File) error { - ctx := context.TODO() - cm, err := clusterdContext.Clientset.CoreV1().ConfigMaps(clusterInfo.Namespace).Get(ctx, k8sutil.ConfigOverrideName, metav1.GetOptions{}) - if err != nil { - if !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to read configmap %q", k8sutil.ConfigOverrideName) - } - return nil - } - - config, ok := cm.Data["config"] - if !ok || config == "" { - logger.Debugf("No ceph configuration override to merge as %q configmap is empty", k8sutil.ConfigOverrideName) - return nil - } - - if err := configFile.Append([]byte(config)); err != nil { - return errors.Wrapf(err, "failed to load config data from %q", k8sutil.ConfigOverrideName) - } - - // Remove any debug message setting from the config file - // Debug messages will be printed on stdout, rendering the output of each command unreadable, especially json output - // This call is idempotent and will not fail if the debug message is not present - configFile.Section("global").DeleteKey("debug_ms") - configFile.Section("global").DeleteKey("debug ms") - - return nil -} - -// prepends "client." if a user namespace is not already specified -func getQualifiedUser(user string) string { - if !strings.Contains(user, ".") { - return fmt.Sprintf("client.%s", user) - } - - return user -} - -// CreateDefaultCephConfig creates a default ceph config file. -func CreateDefaultCephConfig(context *clusterd.Context, clusterInfo *ClusterInfo) (*CephConfig, error) { - - cephVersionEnv := os.Getenv("ROOK_CEPH_VERSION") - if cephVersionEnv != "" { - v, err := cephver.ExtractCephVersion(cephVersionEnv) - if err != nil { - return nil, errors.Wrap(err, "failed to extract ceph version") - } - clusterInfo.CephVersion = *v - } - - // extract a list of just the monitor names, which will populate the "mon initial members" - // and "mon hosts" global config field - monMembers, monHosts := PopulateMonHostMembers(clusterInfo.Monitors) - - conf := &CephConfig{ - GlobalConfig: &GlobalConfig{ - FSID: clusterInfo.FSID, - MonMembers: strings.Join(monMembers, " "), - MonHost: strings.Join(monHosts, ","), - PublicAddr: context.NetworkInfo.PublicAddr, - PublicNetwork: context.NetworkInfo.PublicNetwork, - ClusterAddr: context.NetworkInfo.ClusterAddr, - ClusterNetwork: context.NetworkInfo.ClusterNetwork, - }, - } - - return conf, nil -} - -// create a config file with global settings configured, and return an ini file -func createGlobalConfigFileSection(context *clusterd.Context, clusterInfo *ClusterInfo, userConfig *CephConfig) (*ini.File, error) { - - var ceph *CephConfig - - if userConfig != nil { - // use the user config since it was provided - ceph = userConfig - } else { - var err error - ceph, err = CreateDefaultCephConfig(context, clusterInfo) - if err != nil { - return nil, errors.Wrap(err, "failed to create default ceph config") - } - } - - configFile := ini.Empty() - err := ini.ReflectFrom(configFile, ceph) - return configFile, err -} - -// add client config to the ini file -func addClientConfigFileSection(configFile *ini.File, clientName, keyringPath string, settings map[string]string) error { - s, err := configFile.NewSection(clientName) - if err != nil { - return err - } - - if _, err := s.NewKey("keyring", keyringPath); err != nil { - return err - } - - for key, val := range settings { - if _, err := s.NewKey(key, val); err != nil { - return errors.Wrapf(err, "failed to add key %s", key) - } - } - - return nil -} - -// PopulateMonHostMembers extracts a list of just the monitor names, which will populate the "mon initial members" -// and "mon hosts" global config field -func PopulateMonHostMembers(monitors map[string]*MonInfo) ([]string, []string) { - monMembers := make([]string, len(monitors)) - monHosts := make([]string, len(monitors)) - - i := 0 - for _, monitor := range monitors { - monMembers[i] = monitor.Name - monIP := cephutil.GetIPFromEndpoint(monitor.Endpoint) - - // This tries to detect the current port if the mon already exists - // This basically handles the transition between monitors running on 6790 to msgr2 - // So whatever the previous monitor port was we keep it - currentMonPort := cephutil.GetPortFromEndpoint(monitor.Endpoint) - - monPorts := [2]string{strconv.Itoa(int(Msgr2port)), strconv.Itoa(int(currentMonPort))} - msgr2Endpoint := net.JoinHostPort(monIP, monPorts[0]) - msgr1Endpoint := net.JoinHostPort(monIP, monPorts[1]) - - monHosts[i] = "[v2:" + msgr2Endpoint + ",v1:" + msgr1Endpoint + "]" - i++ - } - - return monMembers, monHosts -} - -// WriteCephConfig writes the ceph config so ceph commands can be executed -func WriteCephConfig(context *clusterd.Context, clusterInfo *ClusterInfo) error { - // create the ceph.conf with the default settings - cephConfig, err := CreateDefaultCephConfig(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to create default ceph config") - } - - // write the latest config to the config dir - confFilePath, err := GenerateConnectionConfigWithSettings(context, clusterInfo, cephConfig) - if err != nil { - return errors.Wrap(err, "failed to write connection config") - } - src, err := ioutil.ReadFile(filepath.Clean(confFilePath)) - if err != nil { - return errors.Wrap(err, "failed to copy connection config to /etc/ceph. failed to read the connection config") - } - err = ioutil.WriteFile(DefaultConfigFilePath(), src, 0444) - if err != nil { - return errors.Wrapf(err, "failed to copy connection config to /etc/ceph. failed to write %q", DefaultConfigFilePath()) - } - dst, err := ioutil.ReadFile(DefaultConfigFilePath()) - if err == nil { - logger.Debugf("config file @ %s: %s", DefaultConfigFilePath(), dst) - } else { - logger.Warningf("wrote and copied config file but failed to read it back from %s for logging. %v", DefaultConfigFilePath(), err) - } - return nil -} diff --git a/pkg/daemon/ceph/client/config_test.go b/pkg/daemon/ceph/client/config_test.go deleted file mode 100644 index 2f28463bb..000000000 --- a/pkg/daemon/ceph/client/config_test.go +++ /dev/null @@ -1,186 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/coreos/pkg/capnslog" - "github.com/go-ini/ini" - "github.com/rook/rook/pkg/clusterd" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/stretchr/testify/assert" -) - -func TestCreateDefaultCephConfig(t *testing.T) { - clusterInfo := &ClusterInfo{ - FSID: "id", - MonitorSecret: "monsecret", - Namespace: "foo-cluster", - Monitors: map[string]*MonInfo{ - "node0": {Name: "mon0", Endpoint: "10.0.0.1:6789"}, - "node1": {Name: "mon1", Endpoint: "10.0.0.2:6789"}, - }, - CephVersion: cephver.Nautilus, - } - - // start with INFO level logging - context := &clusterd.Context{ - LogLevel: capnslog.INFO, - NetworkInfo: clusterd.NetworkInfo{ - PublicAddr: "10.1.1.1", - PublicNetwork: "10.1.1.0/24", - ClusterAddr: "10.1.2.2", - ClusterNetwork: "10.1.2.0/24", - }, - } - - cephConfig, err := CreateDefaultCephConfig(context, clusterInfo) - if err != nil { - t.Fatalf("failed to create default ceph config. %+v", err) - } - verifyConfig(t, cephConfig, clusterInfo, 0) - - // now use DEBUG level logging - context.LogLevel = capnslog.DEBUG - - cephConfig, err = CreateDefaultCephConfig(context, clusterInfo) - if err != nil { - t.Fatalf("failed to create default ceph config. %+v", err) - } - verifyConfig(t, cephConfig, clusterInfo, 10) - - // verify the network info config - assert.Equal(t, "10.1.1.1", cephConfig.PublicAddr) - assert.Equal(t, "10.1.1.0/24", cephConfig.PublicNetwork) - assert.Equal(t, "10.1.2.2", cephConfig.ClusterAddr) - assert.Equal(t, "10.1.2.0/24", cephConfig.ClusterNetwork) -} - -func TestGenerateConfigFile(t *testing.T) { - ctx := context.TODO() - // set up a temporary config directory that will be cleaned up after test - configDir, err := ioutil.TempDir("", "TestGenerateConfigFile") - if err != nil { - t.Fatalf("failed to create temp config dir: %+v", err) - } - defer os.RemoveAll(configDir) - - // create mocked cluster context and info - clientset := test.New(t, 3) - - context := &clusterd.Context{ - ConfigDir: configDir, - Clientset: clientset, - } - - ns := "foo-cluster" - data := make(map[string]string, 1) - data["config"] = "[global]\n bluestore_min_alloc_size_hdd = 4096" - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: k8sutil.ConfigOverrideName, - Namespace: ns, - }, - Data: data, - } - _, err = clientset.CoreV1().ConfigMaps(ns).Create(ctx, cm, metav1.CreateOptions{}) - assert.NoError(t, err) - - clusterInfo := &ClusterInfo{ - FSID: "myfsid", - MonitorSecret: "monsecret", - Namespace: ns, - Monitors: map[string]*MonInfo{ - "node0": {Name: "mon0", Endpoint: "10.0.0.1:6789"}, - }, - CephVersion: cephver.Nautilus, - CephCred: CephCred{Username: "admin", Secret: "mysecret"}, - } - - isInitialized := clusterInfo.IsInitialized(true) - assert.True(t, isInitialized) - - // generate the config file to disk now - configFilePath, err := generateConfigFile(context, clusterInfo, configDir, filepath.Join(configDir, "mykeyring"), nil, nil) - assert.Nil(t, err) - assert.Equal(t, filepath.Join(configDir, "foo-cluster.config"), configFilePath) - - // verify some of the contents of written config file by loading it from disk - actualConf, err := ini.Load(configFilePath) - assert.Nil(t, err) - verifyConfigValue(t, actualConf, "global", "fsid", clusterInfo.FSID) - verifyConfigValue(t, actualConf, "global", "bluestore_min_alloc_size_hdd", "4096") -} - -func verifyConfig(t *testing.T, cephConfig *CephConfig, cluster *ClusterInfo, loggingLevel int) { - monMembers := make([]string, len(cluster.Monitors)) - i := 0 - for _, expectedMon := range cluster.Monitors { - contained := false - monMembers[i] = expectedMon.Name - for _, actualMon := range strings.Split(cephConfig.MonMembers, " ") { - if expectedMon.Name == actualMon { - contained = true - break - } - } - - assert.True(t, contained) - } - - // Testing mon_host - - expectedMons := "[v2:10.0.0.1:3300,v1:10.0.0.1:6789],[v2:10.0.0.2:3300,v1:10.0.0.2:6789]" - - for _, expectedMon := range strings.Split(expectedMons, ",") { - contained := false - for _, actualMon := range strings.Split(cephConfig.MonHost, ",") { - if expectedMon == actualMon { - contained = true - break - } - } - - assert.True(t, contained, "expectedMons: %+v, actualMons: %+v", expectedMons, cephConfig.MonHost) - } -} - -func verifyConfigValue(t *testing.T, actualConf *ini.File, section, key, expectedVal string) { - s, err := actualConf.GetSection(section) - if !assert.Nil(t, err) { - return - } - - k := s.Key(key) - if !assert.NotNil(t, k) { - return - } - - actualVal := k.Value() - assert.Equal(t, expectedVal, actualVal) -} diff --git a/pkg/daemon/ceph/client/crash.go b/pkg/daemon/ceph/client/crash.go deleted file mode 100644 index 8f8d478a9..000000000 --- a/pkg/daemon/ceph/client/crash.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" -) - -// CrashList is go representation of the "ceph crash ls" command output -type CrashList struct { - ID string `json:"crash_id"` - Entity string `json:"entity_name"` - Timestamp string `json:"timestamp"` - ProcessName string `json:"process_name,omitempty"` - CephVersion string `json:"ceph_version,omitempty"` - UtsnameHostname string `json:"utsname_hostname,omitempty"` - UtsnameSysname string `json:"utsname_sysname,omitempty"` - UtsnameRelease string `json:"utsname_release,omitempty"` - UtsnameVersion string `json:"utsname_version,omitempty"` - UtsnameMachine string `json:"utsname_machine,omitempty"` - OsName string `json:"os_name,omitempty"` - OsID string `json:"os_id,omitempty"` - OsVersionID string `json:"os_version_id,omitempty"` - OsVersion string `json:"os_version,omitempty"` - AssertCondition string `json:"assert_condition,omitempty"` - AssertFunc string `json:"assert_func,omitempty"` - AssertLine int `json:"assert_line,omitempty"` - AssertFile string `json:"assert_file,omitempty"` - AssertThreadName string `json:"assert_thread_name,omitempty"` - AssertMsg string `json:"assert_msg,omitempty"` - IoError bool `json:"io_error,omitempty"` - IoErrorDevname string `json:"io_error_devname,omitempty"` - IoErrorPath string `json:"io_error_path,omitempty"` - IoErrorCode int `json:"io_error_code,omitempty"` - IoErrorOptype int `json:"io_error_optype,omitempty"` - IoErrorOffset int `json:"io_error_offset,omitempty"` - IoErrorLength int `json:"iio_error_length,omitempty"` - Backtrace []string `json:"backtrace,omitempty"` -} - -// GetCrashList gets the list of Crashes. -func GetCrashList(context *clusterd.Context, clusterInfo *ClusterInfo) ([]CrashList, error) { - crashargs := []string{"crash", "ls"} - output, err := NewCephCommand(context, clusterInfo, crashargs).Run() - if err != nil { - return nil, errors.Wrap(err, "failed to list ceph crash") - } - var crash []CrashList - err = json.Unmarshal(output, &crash) - if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal crash ls response. %s", string(output)) - } - return crash, err -} - -// ArchiveCrash archives the crash with respective crashID -func ArchiveCrash(context *clusterd.Context, clusterInfo *ClusterInfo, crashID string) error { - crashsilenceargs := []string{"crash", "archive", crashID} - _, err := NewCephCommand(context, clusterInfo, crashsilenceargs).Run() - if err != nil { - return errors.Wrapf(err, "failed to archive crash %q", crashID) - } - return nil -} - -// GetCrash gets the crash list -func GetCrash(context *clusterd.Context, clusterInfo *ClusterInfo) ([]CrashList, error) { - crash, err := GetCrashList(context, clusterInfo) - if err != nil { - return nil, errors.Wrap(err, "failed to list ceph crash") - } - return crash, nil -} diff --git a/pkg/daemon/ceph/client/crash_test.go b/pkg/daemon/ceph/client/crash_test.go deleted file mode 100644 index ccd40634a..000000000 --- a/pkg/daemon/ceph/client/crash_test.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -var ( - fakecrash = `[ - { - "crash_id": "2020-11-09_13:58:08.230130Z_ca918f58-c078-444d-a91a-bd972c14c155", - "timestamp": "2020-11-09 13:58:08.230130Z", - "process_name": "ceph-osd", - "entity_name": "osd.0" - } - ]` -) - -func TestCephCrash(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("ExecuteCommandWithOutputFile: %s %v", command, args) - if args[0] == "crash" && args[1] == "ls" { - return fakecrash, nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - crash, err := GetCrashList(context, AdminClusterInfo("mycluster")) - assert.NoError(t, err) - assert.Equal(t, 1, len(crash)) -} diff --git a/pkg/daemon/ceph/client/crush.go b/pkg/daemon/ceph/client/crush.go deleted file mode 100644 index e3ce35633..000000000 --- a/pkg/daemon/ceph/client/crush.go +++ /dev/null @@ -1,263 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "strconv" - "strings" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" -) - -const ( - CrushRootConfigKey = "crushRoot" -) - -// CrushMap is the go representation of a CRUSH map -type CrushMap struct { - Devices []struct { - ID int `json:"id"` - Name string `json:"name"` - Class string `json:"class"` - } `json:"devices"` - Types []struct { - ID int `json:"type_id"` - Name string `json:"name"` - } `json:"types"` - Buckets []struct { - ID int `json:"id"` - Name string `json:"name"` - TypeID int `json:"type_id"` - TypeName string `json:"type_name"` - Weight int `json:"weight"` - Alg string `json:"alg"` - Hash string `json:"hash"` - Items []struct { - ID int `json:"id"` - Weight int `json:"weight"` - Pos int `json:"pos"` - } `json:"items"` - } `json:"buckets"` - Rules []ruleSpec `json:"rules"` - Tunables struct { - // Add if necessary - } `json:"tunables"` -} - -type ruleSpec struct { - ID int `json:"rule_id"` - Name string `json:"rule_name"` - Ruleset int `json:"ruleset"` - Type int `json:"type"` - MinSize int `json:"min_size"` - MaxSize int `json:"max_size"` - Steps []stepSpec `json:"steps"` -} - -type stepSpec struct { - Operation string `json:"op"` - Number uint `json:"num"` - Item int `json:"item"` - ItemName string `json:"item_name"` - Type string `json:"type"` -} - -// CrushFindResult is go representation of the Ceph osd find command output -type CrushFindResult struct { - ID int `json:"osd"` - IP string `json:"ip"` - Host string `json:"host,omitempty"` - Location map[string]string `json:"crush_location"` -} - -// GetCrushMap fetches the Ceph CRUSH map -func GetCrushMap(context *clusterd.Context, clusterInfo *ClusterInfo) (CrushMap, error) { - var c CrushMap - args := []string{"osd", "crush", "dump"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return c, errors.Wrapf(err, "failed to get crush map. %s", string(buf)) - } - - err = json.Unmarshal(buf, &c) - if err != nil { - return c, errors.Wrap(err, "failed to unmarshal crush map") - } - - return c, nil -} - -// GetCompiledCrushMap fetches the Ceph compiled version of the CRUSH map -func GetCompiledCrushMap(context *clusterd.Context, clusterInfo *ClusterInfo) (string, error) { - compiledCrushMapFile, err := ioutil.TempFile("", "") - if err != nil { - return "", errors.Wrap(err, "failed to generate temporarily file") - } - - args := []string{"osd", "getcrushmap", "--out-file", compiledCrushMapFile.Name()} - exec := NewCephCommand(context, clusterInfo, args) - exec.JsonOutput = false - buf, err := exec.Run() - if err != nil { - return "", errors.Wrapf(err, "failed to get compiled crush map. %s", string(buf)) - } - - return compiledCrushMapFile.Name(), nil -} - -// FindOSDInCrushMap finds an OSD in the CRUSH map -func FindOSDInCrushMap(context *clusterd.Context, clusterInfo *ClusterInfo, osdID int) (*CrushFindResult, error) { - args := []string{"osd", "find", strconv.Itoa(osdID)} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to find osd.%d in crush map: %s", osdID, string(buf)) - } - - var result CrushFindResult - if err := json.Unmarshal(buf, &result); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal crush find result: %s", string(buf)) - } - - return &result, nil -} - -// GetCrushHostName gets the hostname where an OSD is running on -func GetCrushHostName(context *clusterd.Context, clusterInfo *ClusterInfo, osdID int) (string, error) { - result, err := FindOSDInCrushMap(context, clusterInfo, osdID) - if err != nil { - return "", err - } - - return result.Location["host"], nil -} - -// NormalizeCrushName replaces . with - -func NormalizeCrushName(name string) string { - return strings.Replace(name, ".", "-", -1) -} - -// Obtain the cluster-wide default crush root from the cluster spec -func GetCrushRootFromSpec(c *cephv1.ClusterSpec) string { - if c.Storage.Config == nil { - return cephv1.DefaultCRUSHRoot - } - if v, ok := c.Storage.Config[CrushRootConfigKey]; ok { - return v - } - return cephv1.DefaultCRUSHRoot -} - -// IsNormalizedCrushNameEqual returns true if normalized is either equal to or the normalized version of notNormalized -// a crush name is normalized if it comes from the crushmap or has passed through the NormalizeCrushName function. -func IsNormalizedCrushNameEqual(notNormalized, normalized string) bool { - if notNormalized == normalized || NormalizeCrushName(notNormalized) == normalized { - return true - } - return false -} - -// UpdateCrushMapValue is for updating the location in the crush map -// this is not safe for incorrectly formatted strings -func UpdateCrushMapValue(pairs *[]string, key, value string) { - found := false - property := formatProperty(key, value) - for i, pair := range *pairs { - entry := strings.Split(pair, "=") - if key == entry[0] { - (*pairs)[i] = property - found = true - } - } - if !found { - *pairs = append(*pairs, property) - } -} - -func formatProperty(name, value string) string { - return fmt.Sprintf("%s=%s", name, value) -} - -// GetOSDOnHost returns the list of osds running on a given host -func GetOSDOnHost(context *clusterd.Context, clusterInfo *ClusterInfo, node string) (string, error) { - node = NormalizeCrushName(node) - args := []string{"osd", "crush", "ls", node} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return "", errors.Wrapf(err, "failed to get osd list on host. %s", string(buf)) - } - - return string(buf), nil -} - -func compileCRUSHMap(context *clusterd.Context, crushMapPath string) error { - mapFile := buildCompileCRUSHFileName(crushMapPath) - args := []string{"--compile", crushMapPath, "--outfn", mapFile} - output, err := context.Executor.ExecuteCommandWithOutput("crushtool", args...) - if err != nil { - return errors.Wrapf(err, "failed to compile crush map %q. %s", mapFile, output) - } - - return nil -} - -func decompileCRUSHMap(context *clusterd.Context, crushMapPath string) error { - mapFile := buildDecompileCRUSHFileName(crushMapPath) - args := []string{"--decompile", crushMapPath, "--outfn", mapFile} - output, err := context.Executor.ExecuteCommandWithOutput("crushtool", args...) - if err != nil { - return errors.Wrapf(err, "failed to decompile crush map %q. %s", mapFile, output) - } - - return nil -} - -func injectCRUSHMap(context *clusterd.Context, clusterInfo *ClusterInfo, crushMapPath string) error { - args := []string{"osd", "setcrushmap", "--in-file", crushMapPath} - exec := NewCephCommand(context, clusterInfo, args) - exec.JsonOutput = false - buf, err := exec.Run() - if err != nil { - return errors.Wrapf(err, "failed to inject crush map %q. %s", crushMapPath, string(buf)) - } - - return nil -} - -func setCRUSHMap(context *clusterd.Context, clusterInfo *ClusterInfo, crushMapPath string) error { - args := []string{"osd", "crush", "set", crushMapPath} - exec := NewCephCommand(context, clusterInfo, args) - exec.JsonOutput = false - buf, err := exec.Run() - if err != nil { - return errors.Wrapf(err, "failed to set crush map %q. %s", crushMapPath, string(buf)) - } - - return nil -} - -func buildDecompileCRUSHFileName(crushMapPath string) string { - return fmt.Sprintf("%s.decompiled", crushMapPath) -} - -func buildCompileCRUSHFileName(crushMapPath string) string { - return fmt.Sprintf("%s.compiled", crushMapPath) -} diff --git a/pkg/daemon/ceph/client/crush_rule.go b/pkg/daemon/ceph/client/crush_rule.go deleted file mode 100644 index e479af3c0..000000000 --- a/pkg/daemon/ceph/client/crush_rule.go +++ /dev/null @@ -1,182 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "fmt" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" -) - -const ( - crushReplicatedType = 1 - ruleMinSizeDefault = 1 - ruleMaxSizeDefault = 10 - twoStepCRUSHRuleTemplate = ` -rule %s { - id %d - type replicated - min_size %d - max_size %d - step take %s %s - step choose firstn 0 type %s - step chooseleaf firstn 2 type %s - step emit -} -` - twoStepHybridCRUSHRuleTemplate = ` -rule %s { - id %d - type replicated - min_size %d - max_size %d - step take %s class %s - step chooseleaf firstn 1 type %s - step emit - step take %s class %s - step chooseleaf firstn 0 type %s - step emit -} -` -) - -var ( - stepEmit = &stepSpec{Operation: "emit"} -) - -func buildTwoStepPlainCrushRule(crushMap CrushMap, ruleName string, pool cephv1.PoolSpec) string { - var crushRuleInsert string - if pool.DeviceClass != "" { - crushRuleInsert = fmt.Sprintf("class %s", pool.DeviceClass) - } - return fmt.Sprintf( - twoStepCRUSHRuleTemplate, - ruleName, - generateRuleID(crushMap.Rules), - ruleMinSizeDefault, - ruleMaxSizeDefault, - pool.CrushRoot, - crushRuleInsert, - pool.FailureDomain, - pool.Replicated.SubFailureDomain, - ) -} - -func buildTwoStepHybridCrushRule(crushMap CrushMap, ruleName string, pool cephv1.PoolSpec) string { - primaryOSDDeviceClass := pool.Replicated.HybridStorage.PrimaryDeviceClass - secondaryOSDsDeviceClass := pool.Replicated.HybridStorage.SecondaryDeviceClass - - return fmt.Sprintf( - twoStepHybridCRUSHRuleTemplate, - ruleName, - generateRuleID(crushMap.Rules), - ruleMinSizeDefault, - ruleMaxSizeDefault, - pool.CrushRoot, - primaryOSDDeviceClass, - pool.FailureDomain, - pool.CrushRoot, - secondaryOSDsDeviceClass, - pool.FailureDomain, - ) -} - -func buildTwoStepCrushRule(crushMap CrushMap, ruleName string, pool cephv1.PoolSpec) *ruleSpec { - /* - The complete CRUSH rule looks like this: - - rule two_rep_per_dc { - id 1 - type replicated - min_size 1 - max_size 10 - step take root - step choose firstn 0 type datacenter - step chooseleaf firstn 2 type host - step emit - } - - */ - - ruleID := generateRuleID(crushMap.Rules) - return &ruleSpec{ - ID: ruleID, - Name: ruleName, - Ruleset: ruleID, - Type: crushReplicatedType, - MinSize: ruleMinSizeDefault, - MaxSize: ruleMaxSizeDefault, - Steps: buildTwoStepCrushSteps(pool), - } -} - -func buildTwoStepCrushSteps(pool cephv1.PoolSpec) []stepSpec { - // Create CRUSH rule steps - steps := []stepSpec{} - - // Create the default step, which is essentially the entrypoint, the "root" of all requests - stepTakeDefault := &stepSpec{ - Operation: "take", - Item: -1, - ItemName: pool.CrushRoot, - } - steps = append(steps, *stepTakeDefault) - - // Steps two - stepTakeFailureDomain := &stepSpec{ - Operation: "chooseleaf_firstn", - Number: 0, - Type: pool.FailureDomain, - } - steps = append(steps, *stepTakeFailureDomain) - - // Step three - stepTakeSubFailureDomain := &stepSpec{ - Operation: "chooseleaf_firstn", - Number: pool.Replicated.ReplicasPerFailureDomain, - Type: pool.Replicated.SubFailureDomain, - } - steps = append(steps, *stepTakeSubFailureDomain) - steps = append(steps, *stepEmit) - - return steps -} - -func generateRuleID(rules []ruleSpec) int { - newRulesID := rules[len(rules)-1].ID + 1 - - for { - ruleIDExists := checkIfRuleIDExists(rules, newRulesID) - if !ruleIDExists { - break - } else { - newRulesID++ - } - } - - return newRulesID -} - -func checkIfRuleIDExists(rules []ruleSpec, ID int) bool { - for _, rule := range rules { - if rule.ID == ID { - return true - } - } - - return false -} diff --git a/pkg/daemon/ceph/client/crush_rule_test.go b/pkg/daemon/ceph/client/crush_rule_test.go deleted file mode 100644 index a3fbaa2ff..000000000 --- a/pkg/daemon/ceph/client/crush_rule_test.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - "testing" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestBuildStretchClusterCrushRule(t *testing.T) { - var crushMap CrushMap - err := json.Unmarshal([]byte(testCrushMap), &crushMap) - assert.NoError(t, err) - - pool := &cephv1.PoolSpec{ - FailureDomain: "datacenter", - CrushRoot: cephv1.DefaultCRUSHRoot, - Replicated: cephv1.ReplicatedSpec{ - ReplicasPerFailureDomain: 2, - }, - } - - rule := buildTwoStepCrushRule(crushMap, "stretched", *pool) - assert.Equal(t, 2, rule.ID) -} - -func TestBuildCrushSteps(t *testing.T) { - pool := &cephv1.PoolSpec{ - FailureDomain: "datacenter", - CrushRoot: cephv1.DefaultCRUSHRoot, - Replicated: cephv1.ReplicatedSpec{ - ReplicasPerFailureDomain: 2, - }, - } - steps := buildTwoStepCrushSteps(*pool) - assert.Equal(t, 4, len(steps)) - assert.Equal(t, cephv1.DefaultCRUSHRoot, steps[0].ItemName) - assert.Equal(t, "datacenter", steps[1].Type) - assert.Equal(t, uint(2), steps[2].Number) -} - -func TestCompileCRUSHMap(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if command == "crushtool" && args[0] == "--compile" && args[1] == "/tmp/063990228" && args[2] == "--outfn" && args[3] == "/tmp/063990228.compiled" { - return "3", nil - } - return "", errors.Errorf("unexpected ceph command '%v'", args) - } - - err := compileCRUSHMap(&clusterd.Context{Executor: executor}, "/tmp/063990228") - assert.Nil(t, err) -} - -func TestDecompileCRUSHMap(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if command == "crushtool" && args[0] == "--decompile" && args[1] == "/tmp/063990228" && args[2] == "--outfn" && args[3] == "/tmp/063990228.decompiled" { - return "3", nil - } - return "", errors.Errorf("unexpected ceph command '%v'", args) - } - - err := decompileCRUSHMap(&clusterd.Context{Executor: executor}, "/tmp/063990228") - assert.Nil(t, err) -} - -func TestInjectCRUSHMapMap(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "osd" && args[1] == "setcrushmap" && args[2] == "--in-file" && args[3] == "/tmp/063990228.compiled" { - return "3", nil - } - return "", errors.Errorf("unexpected ceph command '%v'", args) - } - - err := injectCRUSHMap(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "/tmp/063990228.compiled") - assert.Nil(t, err) -} - -func TestSetCRUSHMapMap(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "osd" && args[1] == "crush" && args[2] == "set" && args[3] == "/tmp/063990228.compiled" { - return "3", nil - } - return "", errors.Errorf("unexpected ceph command '%v'", args) - } - - err := setCRUSHMap(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "/tmp/063990228.compiled") - assert.Nil(t, err) -} - -func Test_generateRuleID(t *testing.T) { - tests := []struct { - name string - args []ruleSpec - want int - }{ - {"ordered rules", []ruleSpec{{ID: 1}, {ID: 2}, {ID: 3}}, 4}, - {"unordered rules", []ruleSpec{{ID: 1}, {ID: 3}, {ID: 2}, {ID: 5}}, 6}, - {"unordered rules", []ruleSpec{{ID: 1}, {ID: 3}, {ID: 2}}, 4}, - {"ordered rules", []ruleSpec{{ID: 1}, {ID: 3}}, 4}, - {"ordered rules", []ruleSpec{{ID: 1}}, 2}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := generateRuleID(tt.args); got != tt.want { - t.Errorf("generateRuleID() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/daemon/ceph/client/crush_test.go b/pkg/daemon/ceph/client/crush_test.go deleted file mode 100644 index db7d35824..000000000 --- a/pkg/daemon/ceph/client/crush_test.go +++ /dev/null @@ -1,337 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "fmt" - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -const testCrushMap = `{ - "devices": [ - { - "id": 0, - "name": "osd.0", - "class": "hdd" - } - ], - "types": [ - { - "type_id": 0, - "name": "osd" - }, - { - "type_id": 1, - "name": "host" - }, - { - "type_id": 2, - "name": "chassis" - }, - { - "type_id": 3, - "name": "rack" - }, - { - "type_id": 4, - "name": "row" - }, - { - "type_id": 5, - "name": "pdu" - }, - { - "type_id": 6, - "name": "pod" - }, - { - "type_id": 7, - "name": "room" - }, - { - "type_id": 8, - "name": "datacenter" - }, - { - "type_id": 9, - "name": "region" - }, - { - "type_id": 10, - "name": "root" - } - ], - "buckets": [ - { - "id": -1, - "name": "default", - "type_id": 10, - "type_name": "root", - "weight": 1028, - "alg": "straw", - "hash": "rjenkins1", - "items": [ - { - "id": -3, - "weight": 1028, - "pos": 0 - } - ] - }, - { - "id": -2, - "name": "default~hdd", - "type_id": 10, - "type_name": "root", - "weight": 1028, - "alg": "straw", - "hash": "rjenkins1", - "items": [ - { - "id": -4, - "weight": 1028, - "pos": 0 - } - ] - }, - { - "id": -3, - "name": "minikube", - "type_id": 1, - "type_name": "host", - "weight": 1028, - "alg": "straw", - "hash": "rjenkins1", - "items": [ - { - "id": 0, - "weight": 1028, - "pos": 0 - } - ] - }, - { - "id": -4, - "name": "minikube~hdd", - "type_id": 1, - "type_name": "host", - "weight": 1028, - "alg": "straw", - "hash": "rjenkins1", - "items": [ - { - "id": 0, - "weight": 1028, - "pos": 0 - } - ] - } - ], - "rules": [ - { - "rule_id": 0, - "rule_name": "replicated_ruleset", - "ruleset": 0, - "type": 1, - "min_size": 1, - "max_size": 10, - "steps": [ - { - "op": "take", - "item": -1, - "item_name": "default" - }, - { - "op": "chooseleaf_firstn", - "num": 0, - "type": "host" - }, - { - "op": "emit" - } - ] - }, - { - "rule_id": 1, - "rule_name": "hybrid_ruleset", - "ruleset": 1, - "type": 1, - "min_size": 1, - "max_size": 10, - "steps": [ - { - "op": "take", - "item": -2, - "item_name": "default~hdd" - }, - { - "op": "chooseleaf_firstn", - "num": 1, - "type": "host" - }, - { - "op": "emit" - }, - { - "op": "take", - "item": -2, - "item_name": "default~ssd" - }, - { - "op": "chooseleaf_firstn", - "num": 0, - "type": "host" - }, - { - "op": "emit" - } - ] - }, - { - "rule_id": 1, - "rule_name": "my-store.rgw.buckets.data", - "ruleset": 1, - "type": 3, - "min_size": 3, - "max_size": 3, - "steps": [ - { - "op": "set_chooseleaf_tries", - "num": 5 - }, - { - "op": "set_choose_tries", - "num": 100 - }, - { - "op": "take", - "item": -1, - "item_name": "default" - }, - { - "op": "chooseleaf_indep", - "num": 0, - "type": "host" - }, - { - "op": "emit" - } - ] - } - ], - "tunables": { - "choose_local_tries": 0, - "choose_local_fallback_tries": 0, - "choose_total_tries": 50, - "chooseleaf_descend_once": 1, - "chooseleaf_vary_r": 1, - "chooseleaf_stable": 0, - "straw_calc_version": 1, - "allowed_bucket_algs": 22, - "profile": "firefly", - "optimal_tunables": 0, - "legacy_tunables": 0, - "minimum_required_version": "firefly", - "require_feature_tunables": 1, - "require_feature_tunables2": 1, - "has_v2_rules": 1, - "require_feature_tunables3": 1, - "has_v3_rules": 0, - "has_v4_buckets": 0, - "require_feature_tunables5": 0, - "has_v5_rules": 0 - }, - "choose_args": {} -} -` - -func TestGetCrushMap(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "crush" && args[2] == "dump" { - return testCrushMap, nil - } - return "", errors.Errorf("unexpected ceph command '%v'", args) - } - crush, err := GetCrushMap(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster")) - - assert.Nil(t, err) - assert.Equal(t, 11, len(crush.Types)) - assert.Equal(t, 1, len(crush.Devices)) - assert.Equal(t, 4, len(crush.Buckets)) - assert.Equal(t, 3, len(crush.Rules)) -} - -func TestGetOSDOnHost(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "crush" && args[2] == "ls" { - return "[\"osd.2\",\"osd.0\",\"osd.1\"]", nil - } - return "", errors.Errorf("unexpected ceph command '%v'", args) - } - - _, err := GetOSDOnHost(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "my-host") - assert.Nil(t, err) -} - -func TestCrushName(t *testing.T) { - // each is slightly different than the last - crushNames := []string{ - "www.zxyz.com", - "www.abcd.com", - "ip-10-0-132-84.us-east-2.compute.internal", - "ip-10-0-132-85.us-east-2.compute.internal", - "worker1", - "worker2", - "master1", - "master2", - "us-east-2b", - "us-east-2c", - "us-east-1", - "us-east-2", - "ip-10-0-175-140", - "ip-10-0-175-141", - } - - for i, crushName := range crushNames { - normalizedCrushName := NormalizeCrushName(crushName) - fmt.Printf("crushName: %s, normalizedCrushName: %s\n", crushName, normalizedCrushName) - assert.True(t, IsNormalizedCrushNameEqual(crushName, normalizedCrushName)) - assert.True(t, IsNormalizedCrushNameEqual(crushName, crushName)) - assert.True(t, IsNormalizedCrushNameEqual(normalizedCrushName, normalizedCrushName)) - if i > 0 { - // slightly different crush name - differentCrushName := crushNames[i-1] - differentNormalizedCrushName := NormalizeCrushName(differentCrushName) - assert.False(t, IsNormalizedCrushNameEqual(crushName, differentNormalizedCrushName)) - assert.False(t, IsNormalizedCrushNameEqual(crushName, differentCrushName)) - assert.False(t, IsNormalizedCrushNameEqual(normalizedCrushName, differentNormalizedCrushName)) - } - } -} - -func TestBuildCompiledDecompileCRUSHFileName(t *testing.T) { - assert.Equal(t, "/tmp/06399022.decompiled", buildDecompileCRUSHFileName("/tmp/06399022")) - assert.Equal(t, "/tmp/06399022.compiled", buildCompileCRUSHFileName("/tmp/06399022")) -} diff --git a/pkg/daemon/ceph/client/deviceclass.go b/pkg/daemon/ceph/client/deviceclass.go deleted file mode 100644 index fda23be07..000000000 --- a/pkg/daemon/ceph/client/deviceclass.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" -) - -// GetDeviceClasses gets the available device classes. -func GetDeviceClasses(context *clusterd.Context, clusterInfo *ClusterInfo) ([]string, error) { - args := []string{"osd", "crush", "class", "ls"} - cmd := NewCephCommand(context, clusterInfo, args) - buf, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to get deviceclasses. %s", string(buf)) - } - - var deviceclass []string - if err := json.Unmarshal(buf, &deviceclass); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal osd crush class list response") - } - - return deviceclass, nil -} - -// GetDeviceClassOSDs gets the OSDs associated with a device class. -func GetDeviceClassOSDs(context *clusterd.Context, clusterInfo *ClusterInfo, deviceClass string) ([]int, error) { - args := []string{"osd", "crush", "class", "ls-osd", deviceClass} - cmd := NewCephCommand(context, clusterInfo, args) - buf, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to get device class osd. %s", string(buf)) - } - - var osds []int - if err := json.Unmarshal(buf, &osds); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal device class osd response") - } - - return osds, nil -} diff --git a/pkg/daemon/ceph/client/deviceclass_test.go b/pkg/daemon/ceph/client/deviceclass_test.go deleted file mode 100644 index 5470801c9..000000000 --- a/pkg/daemon/ceph/client/deviceclass_test.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestGetDeviceClassOSDs(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "crush" && args[2] == "class" && args[3] == "ls-osd" && args[4] == "ssd" { - // Mock executor for `ceph osd crush class ls-osd ssd` - return "[0, 1, 2]", nil - } else if args[1] == "crush" && args[2] == "class" && args[3] == "ls-osd" && args[4] == "hdd" { - // Mock executor for `ceph osd crush class ls-osd hdd` - return "[]", nil - } - return "", errors.Errorf("unexpected ceph command '%v'", args) - } - osds, err := GetDeviceClassOSDs(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "ssd") - assert.Nil(t, err) - assert.Equal(t, []int{0, 1, 2}, osds) - - osds, err = GetDeviceClassOSDs(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "hdd") - assert.Nil(t, err) - assert.Equal(t, []int{}, osds) -} diff --git a/pkg/daemon/ceph/client/erasure-code-profile.go b/pkg/daemon/ceph/client/erasure-code-profile.go deleted file mode 100644 index bb8c2fbad..000000000 --- a/pkg/daemon/ceph/client/erasure-code-profile.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "encoding/json" - "fmt" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" -) - -type CephErasureCodeProfile struct { - DataChunkCount uint `json:"k,string"` - CodingChunkCount uint `json:"m,string"` - Plugin string `json:"plugin"` - Technique string `json:"technique"` - FailureDomain string `json:"crush-failure-domain"` - CrushRoot string `json:"crush-root"` -} - -func ListErasureCodeProfiles(context *clusterd.Context, clusterInfo *ClusterInfo) ([]string, error) { - args := []string{"osd", "erasure-code-profile", "ls"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return nil, errors.Wrap(err, "failed to list erasure-code-profiles") - } - - var ecProfiles []string - err = json.Unmarshal(buf, &ecProfiles) - if err != nil { - return nil, errors.Wrapf(err, "unmarshal failed raw buffer response %s", string(buf)) - } - - return ecProfiles, nil -} - -func GetErasureCodeProfileDetails(context *clusterd.Context, clusterInfo *ClusterInfo, name string) (CephErasureCodeProfile, error) { - args := []string{"osd", "erasure-code-profile", "get", name} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return CephErasureCodeProfile{}, errors.Wrapf(err, "failed to get erasure-code-profile for %q", name) - } - - var ecProfileDetails CephErasureCodeProfile - err = json.Unmarshal(buf, &ecProfileDetails) - if err != nil { - return CephErasureCodeProfile{}, errors.Wrapf(err, "unmarshal failed raw buffer response %s", string(buf)) - } - - return ecProfileDetails, nil -} - -func CreateErasureCodeProfile(context *clusterd.Context, clusterInfo *ClusterInfo, profileName string, pool cephv1.PoolSpec) error { - // look up the default profile so we can use the default plugin/technique - defaultProfile, err := GetErasureCodeProfileDetails(context, clusterInfo, "default") - if err != nil { - return errors.Wrap(err, "failed to look up default erasure code profile") - } - - // define the profile with a set of key/value pairs - profilePairs := []string{ - fmt.Sprintf("k=%d", pool.ErasureCoded.DataChunks), - fmt.Sprintf("m=%d", pool.ErasureCoded.CodingChunks), - fmt.Sprintf("plugin=%s", defaultProfile.Plugin), - fmt.Sprintf("technique=%s", defaultProfile.Technique), - } - if pool.FailureDomain != "" { - profilePairs = append(profilePairs, fmt.Sprintf("crush-failure-domain=%s", pool.FailureDomain)) - } - if pool.CrushRoot != "" { - profilePairs = append(profilePairs, fmt.Sprintf("crush-root=%s", pool.CrushRoot)) - } - if pool.DeviceClass != "" { - profilePairs = append(profilePairs, fmt.Sprintf("crush-device-class=%s", pool.DeviceClass)) - } - - args := []string{"osd", "erasure-code-profile", "set", profileName, "--force"} - args = append(args, profilePairs...) - _, err = NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrap(err, "failed to set ec-profile") - } - - return nil -} - -func DeleteErasureCodeProfile(context *clusterd.Context, clusterInfo *ClusterInfo, profileName string) error { - args := []string{"osd", "erasure-code-profile", "rm", profileName} - - cmd := NewCephCommand(context, clusterInfo, args) - cmd.JsonOutput = false - buf, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to delete erasure-code-profile %q. output: %q.", profileName, string(buf)) - } - - return nil -} diff --git a/pkg/daemon/ceph/client/erasure-code-profile_test.go b/pkg/daemon/ceph/client/erasure-code-profile_test.go deleted file mode 100644 index df42accb9..000000000 --- a/pkg/daemon/ceph/client/erasure-code-profile_test.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "fmt" - "testing" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - - "github.com/rook/rook/pkg/clusterd" -) - -func TestCreateProfile(t *testing.T) { - testCreateProfile(t, "", "myroot", "") -} - -func TestCreateProfileWithFailureDomain(t *testing.T) { - testCreateProfile(t, "osd", "", "") -} - -func TestCreateProfileWithDeviceClass(t *testing.T) { - testCreateProfile(t, "osd", "", "hdd") -} - -func testCreateProfile(t *testing.T, failureDomain, crushRoot, deviceClass string) { - spec := cephv1.PoolSpec{ - FailureDomain: failureDomain, - CrushRoot: crushRoot, - DeviceClass: deviceClass, - ErasureCoded: cephv1.ErasureCodedSpec{ - DataChunks: 2, - CodingChunks: 3, - }, - } - - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "erasure-code-profile" { - if args[2] == "get" { - assert.Equal(t, "default", args[3]) - return `{"plugin":"myplugin","technique":"t"}`, nil - } - if args[2] == "set" { - assert.Equal(t, "myapp", args[3]) - assert.Equal(t, "--force", args[4]) - assert.Equal(t, fmt.Sprintf("k=%d", spec.ErasureCoded.DataChunks), args[5]) - assert.Equal(t, fmt.Sprintf("m=%d", spec.ErasureCoded.CodingChunks), args[6]) - assert.Equal(t, "plugin=myplugin", args[7]) - assert.Equal(t, "technique=t", args[8]) - nextArg := 9 - if failureDomain != "" { - assert.Equal(t, fmt.Sprintf("crush-failure-domain=%s", failureDomain), args[nextArg]) - nextArg++ - } - if crushRoot != "" { - assert.Equal(t, fmt.Sprintf("crush-root=%s", crushRoot), args[nextArg]) - nextArg++ - } - if deviceClass != "" { - assert.Equal(t, fmt.Sprintf("crush-device-class=%s", deviceClass), args[nextArg]) - } - return "", nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - err := CreateErasureCodeProfile(context, AdminClusterInfo("mycluster"), "myapp", spec) - assert.Nil(t, err) -} diff --git a/pkg/daemon/ceph/client/fake/osd.go b/pkg/daemon/ceph/client/fake/osd.go deleted file mode 100644 index 111d8df29..000000000 --- a/pkg/daemon/ceph/client/fake/osd.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "fmt" - "strconv" - "strings" -) - -// OsdLsOutput returns JSON output from 'ceph osd ls' that can be used for unit tests. It -// returns output for a Ceph cluster with the number of OSDs given as input starting with ID 0. -// example: numOSDs = 5 => return: "[0,1,2,3,4]" -func OsdLsOutput(numOSDs int) string { - stringIDs := make([]string, 0, numOSDs) - for id := 0; id < numOSDs; id++ { - stringIDs = append(stringIDs, strconv.Itoa(id)) - } - return fmt.Sprintf("[%s]", strings.Join(stringIDs, ",")) -} - -// OsdTreeOutput returns JSON output from 'ceph osd tree' that can be used for unit tests. -// It returns output for a Ceph cluster with the given number of nodes and the given number of OSDs -// per node with no complex configuration. This should work even for 0 nodes. -// example: OsdTreeOutput(3, 3) // returns JSON output for the Ceph cluster below -// node0: node1: node2: -// - osd0 - osd1 - osd2 -// - osd3 - osd4 - osd5 -// - osd6 - osd7 - osd8 -func OsdTreeOutput(numNodes, numOSDsPerNode int) string { - // JSON output taken from Ceph Pacific - rootFormat := ` { - "id": -1, - "name": "default", - "type": "root", - "type_id": 11, - "children": [%s] - }` // format: negative node IDs as comma-delimited string (e.g., "-3,-4,-5") - nodeFormat := ` { - "id": %d, - "name": "%s", - "type": "host", - "type_id": 1, - "pool_weights": {}, - "children": [%s] - }` // format: negative node ID, node name, OSD IDs as comma-delimited string (e.g., "0,3,6") - osdFormat := ` { - "id": %d, - "device_class": "hdd", - "name": "osd.%d", - "type": "osd", - "type_id": 0, - "crush_weight": 0.009796142578125, - "depth": 2, - "pool_weights": {}, - "exists": 1, - "status": "up", - "reweight": 1, - "primary_affinity": 1 - }` // format: OSD ID, OSD ID - wrapperFormat := `{ - "nodes": [ -%s - ], - "stray": [] -}` // format: - nodesJSON := []string{} - osdsJSON := []string{} - nodes := []string{} - for n := 0; n < numNodes; n++ { - osds := []string{} - nodeName := fmt.Sprintf("node%d", n) - nodeID := -3 - n - nodes = append(nodes, strconv.Itoa(nodeID)) - for i := 0; i < numOSDsPerNode; i++ { - osdID := n + 3*i - osds = append(osds, strconv.Itoa(osdID)) - osdsJSON = append(osdsJSON, fmt.Sprintf(osdFormat, osdID, osdID)) - } - nodesJSON = append(nodesJSON, fmt.Sprintf(nodeFormat, nodeID, nodeName, strings.Join(osds, ","))) - } - rootJSON := fmt.Sprintf(rootFormat, strings.Join(nodes, ",")) - fullJSON := append(append([]string{rootJSON}, nodesJSON...), osdsJSON...) - rendered := fmt.Sprintf(wrapperFormat, strings.Join(fullJSON, ",\n")) - return rendered -} - -// OsdOkToStopOutput returns JSON output from 'ceph osd ok-to-stop' that can be used for unit tests. -// queriedID should be given as the ID sent to the 'osd ok-to-stop [--max=N]' command. It will -// be returned with relevant NOT ok-to-stop results. -// If returnOsdIds is empty, this returns a NOT ok-to-stop result. Otherwise, it returns an -// ok-to-stop result. returnOsdIds should include queriedID if the result should be successful. -// usePacificPlusOutput instructs the function to render output for Ceph Pacific (v16) and above or -// to render output for Ceph Octopus (v15) and below. -func OsdOkToStopOutput(queriedID int, returnOsdIds []int, useCephPacificPlusOutput bool) string { - // For Pacific and up (Pacific+) - okTemplate := `{"ok_to_stop":true,"osds":[%s],"num_ok_pgs":132,"num_not_ok_pgs":0,"ok_become_degraded":["1.0","1.2","1.3"]}` - notOkTemplate := `{"ok_to_stop":false,"osds":[%d],"num_ok_pgs":161,"num_not_ok_pgs":50,"bad_become_inactive":["1.0","1.3","1.a"],"ok_become_degraded":["1.2","1.4","1.5"]}` - - // Ceph Octopus and below don't return anything on stdout, only success/failure via retcode - if !useCephPacificPlusOutput { - return "" - } - - // Pacific+, NOT ok-to-stop - if len(returnOsdIds) == 0 { - return fmt.Sprintf(notOkTemplate, queriedID) - } - - // Pacific+, ok-to-stop - osdIdsStr := make([]string, len(returnOsdIds)) - for i := 0; i < len(returnOsdIds); i++ { - osdIdsStr[i] = strconv.Itoa(returnOsdIds[i]) - } - return fmt.Sprintf(okTemplate, strings.Join(osdIdsStr, ",")) -} - -// OSDDeviceClassOutput returns JSON output from 'ceph osd crush get-device-class' that can be used for unit tests. -// osdId is a osd ID to get from crush map. If ID is empty raise a fake error. -func OSDDeviceClassOutput(osdId string) string { - if osdId == "" { - return "ERR: fake error from ceph cli" - } - okTemplate := `[{"osd":%s,"device_class":"hdd"}]` - return fmt.Sprintf(okTemplate, osdId) -} diff --git a/pkg/daemon/ceph/client/filesystem.go b/pkg/daemon/ceph/client/filesystem.go deleted file mode 100644 index 08b3e707c..000000000 --- a/pkg/daemon/ceph/client/filesystem.go +++ /dev/null @@ -1,383 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "k8s.io/apimachinery/pkg/util/wait" -) - -type MDSDump struct { - Standbys []MDSStandBy `json:"standbys"` - FileSystems []MDSMap `json:"filesystems"` -} - -type MDSStandBy struct { - Name string `json:"name"` - Rank int `json:"rank"` -} - -// CephFilesystem is a representation of the json structure returned by 'ceph fs ls' -type CephFilesystem struct { - Name string `json:"name"` - MetadataPool string `json:"metadata_pool"` - MetadataPoolID int `json:"metadata_pool_id"` - DataPools []string `json:"data_pools"` - DataPoolIDs []int `json:"data_pool_ids"` -} - -// CephFilesystemDetails is a representation of the main json structure returned by 'ceph fs get' -type CephFilesystemDetails struct { - ID int `json:"id"` - MDSMap MDSMap `json:"mdsmap"` -} - -// MDSMap is a representation of the mds map sub-structure returned by 'ceph fs get' -type MDSMap struct { - FilesystemName string `json:"fs_name"` - Enabled bool `json:"enabled"` - Root int `json:"root"` - TableServer int `json:"tableserver"` - MaxMDS int `json:"max_mds"` - In []int `json:"in"` - Up map[string]int `json:"up"` - MetadataPool int `json:"metadata_pool"` - DataPools []int `json:"data_pools"` - Failed []int `json:"failed"` - Damaged []int `json:"damaged"` - Stopped []int `json:"stopped"` - Info map[string]MDSInfo `json:"info"` -} - -// MDSInfo is a representation of the individual mds daemon sub-sub-structure returned by 'ceph fs get' -type MDSInfo struct { - GID int `json:"gid"` - Name string `json:"name"` - Rank int `json:"rank"` - State string `json:"state"` - Address string `json:"addr"` -} - -// ListFilesystems lists all filesystems provided by the Ceph cluster. -func ListFilesystems(context *clusterd.Context, clusterInfo *ClusterInfo) ([]CephFilesystem, error) { - args := []string{"fs", "ls"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return nil, errors.Wrap(err, "failed to list filesystems") - } - - var filesystems []CephFilesystem - err = json.Unmarshal(buf, &filesystems) - if err != nil { - return nil, errors.Wrapf(err, "unmarshal failed raw buffer response %s", string(buf)) - } - - return filesystems, nil -} - -// GetFilesystem gets detailed status information about a Ceph filesystem. -func GetFilesystem(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string) (*CephFilesystemDetails, error) { - args := []string{"fs", "get", fsName} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return nil, err - } - - var fs CephFilesystemDetails - err = json.Unmarshal(buf, &fs) - if err != nil { - return nil, errors.Wrapf(err, "unmarshal failed raw buffer response %s", string(buf)) - } - - return &fs, nil -} - -// AllowStandbyReplay gets detailed status information about a Ceph filesystem. -func AllowStandbyReplay(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string, allowStandbyReplay bool) error { - logger.Infof("setting allow_standby_replay for filesystem %q", fsName) - args := []string{"fs", "set", fsName, "allow_standby_replay", strconv.FormatBool(allowStandbyReplay)} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to set allow_standby_replay to filesystem %s", fsName) - } - - return nil -} - -// CreateFilesystem performs software configuration steps for Ceph to provide a new filesystem. -func CreateFilesystem(context *clusterd.Context, clusterInfo *ClusterInfo, name, metadataPool string, dataPools []string) error { - if len(dataPools) == 0 { - return errors.New("at least one data pool is required") - } - - logger.Infof("creating filesystem %q with metadata pool %q and data pools %v", name, metadataPool, dataPools) - var err error - - // Always enable multiple fs when running on Pacific - if clusterInfo.CephVersion.IsAtLeastPacific() { - // enable multiple file systems in case this is not the first - args := []string{"fs", "flag", "set", "enable_multiple", "true", confirmFlag} - _, err = NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrap(err, "failed to enable multiple file systems") - } - } - - // create the filesystem - args := []string{"fs", "new", name, metadataPool, dataPools[0]} - - _, err = NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed enabling ceph fs %q", name) - } - - // add each additional pool - for i := 1; i < len(dataPools); i++ { - err = AddDataPoolToFilesystem(context, clusterInfo, name, dataPools[i]) - if err != nil { - logger.Errorf("%v", err) - } - } - - return nil -} - -// AddDataPoolToFilesystem associates the provided data pool with the filesystem. -func AddDataPoolToFilesystem(context *clusterd.Context, clusterInfo *ClusterInfo, name, poolName string) error { - args := []string{"fs", "add_data_pool", name, poolName} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to add pool %q to file system %q. (%v)", poolName, name, err) - } - return nil -} - -// SetNumMDSRanks sets the number of mds ranks (max_mds) for a Ceph filesystem. -func SetNumMDSRanks(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string, activeMDSCount int32) error { - - // Always tell Ceph to set the new max_mds value - args := []string{"fs", "set", fsName, "max_mds", strconv.Itoa(int(activeMDSCount))} - if _, err := NewCephCommand(context, clusterInfo, args).Run(); err != nil { - return errors.Wrapf(err, "failed to set filesystem %s num mds ranks (max_mds) to %d", fsName, activeMDSCount) - } - return nil -} - -// FailAllStandbyReplayMDS: fail all mds in up:standby-replay state -func FailAllStandbyReplayMDS(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string) error { - fs, err := GetFilesystem(context, clusterInfo, fsName) - if err != nil { - return errors.Wrapf(err, "failed to fail standby-replay MDSes for fs %q", fsName) - } - for _, info := range fs.MDSMap.Info { - if info.State == "up:standby-replay" { - if err := FailMDS(context, clusterInfo, info.GID); err != nil { - return errors.Wrapf(err, "failed to fail MDS %q for filesystem %q in up:standby-replay state", info.Name, fsName) - } - } - } - return nil -} - -// GetMdsIdByRank get mds ID from the given rank -func GetMdsIdByRank(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string, rank int32) (string, error) { - fs, err := GetFilesystem(context, clusterInfo, fsName) - if err != nil { - return "", errors.Wrap(err, "failed to get ceph fs dump") - } - gid, ok := fs.MDSMap.Up[fmt.Sprintf("mds_%d", rank)] - if !ok { - return "", errors.Errorf("failed to get mds gid from rank %d", rank) - } - info, ok := fs.MDSMap.Info[fmt.Sprintf("gid_%d", gid)] - if !ok { - return "", errors.Errorf("failed to get mds info for rank %d", rank) - } - return info.Name, nil -} - -// WaitForActiveRanks waits for the filesystem's number of active ranks to equal the desired count. -// It times out with an error if the number of active ranks does not become desired in time. -// Param 'moreIsOkay' will allow success condition if num of ranks is more than active count given. -func WaitForActiveRanks( - context *clusterd.Context, - clusterInfo *ClusterInfo, fsName string, - desiredActiveRanks int32, moreIsOkay bool, timeout time.Duration, -) error { - countText := fmt.Sprintf("%d", desiredActiveRanks) - if moreIsOkay { - // If it's okay to have more active ranks than desired, indicate so in log messages - countText = fmt.Sprintf("%d or more", desiredActiveRanks) - } - logger.Infof("waiting %.2f second(s) for number of active mds daemons for fs %s to become %s", - float64(timeout/time.Second), fsName, countText) - err := wait.Poll(3*time.Second, timeout, func() (bool, error) { - fs, err := GetFilesystem(context, clusterInfo, fsName) - if err != nil { - logger.Errorf( - "Error getting filesystem %q details while waiting for num mds ranks to become %d. %v", - fsName, desiredActiveRanks, err) - } else if fs.MDSMap.MaxMDS == int(desiredActiveRanks) && - activeRanksSuccess(len(fs.MDSMap.Up), int(desiredActiveRanks), moreIsOkay) { - // Both max_mds and number of up MDS daemons must equal desired number of ranks to - // prevent a false positive when Ceph has got the correct number of mdses up but is - // trying to change the number of mdses up to an undesired number. - logger.Debugf("mds ranks for filesystem %q successfully became %d", fsName, desiredActiveRanks) - return true, nil - // continue to inf loop after send ready; only return when get quit signal to - // prevent deadlock - } - return false, nil - }) - if err != nil { - return errors.Errorf("timeout waiting for number active mds daemons for filesystem %q to become %q", - fsName, countText) - } - return nil -} - -func activeRanksSuccess(upCount, desiredRanks int, moreIsOkay bool) bool { - if moreIsOkay { - return upCount >= desiredRanks - } - return upCount == desiredRanks -} - -// MarkFilesystemAsDown marks a Ceph filesystem as down. -func MarkFilesystemAsDown(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string) error { - args := []string{"fs", "set", fsName, "cluster_down", "true"} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to set file system %s to cluster_down", fsName) - } - return nil -} - -// FailMDS instructs Ceph to fail an mds daemon. -func FailMDS(context *clusterd.Context, clusterInfo *ClusterInfo, gid int) error { - args := []string{"mds", "fail", strconv.Itoa(gid)} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to fail mds %d", gid) - } - return nil -} - -// FailFilesystem efficiently brings down the filesystem by marking the filesystem as down -// and failing the MDSes using a single Ceph command. -func FailFilesystem(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string) error { - args := []string{"fs", "fail", fsName} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to fail filesystem %s", fsName) - } - return nil -} - -// RemoveFilesystem performs software configuration steps to remove a Ceph filesystem and its -// backing pools. -func RemoveFilesystem(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string, preservePoolsOnDelete bool) error { - fs, err := GetFilesystem(context, clusterInfo, fsName) - if err != nil { - return errors.Wrapf(err, "filesystem %s not found", fsName) - } - - args := []string{"fs", "rm", fsName, confirmFlag} - _, err = NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "Failed to delete ceph fs %s", fsName) - } - - if !preservePoolsOnDelete { - err = deleteFSPools(context, clusterInfo, fs) - if err != nil { - return errors.Wrapf(err, "failed to delete fs %s pools", fsName) - } - } else { - logger.Infof("PreservePoolsOnDelete is set in filesystem %s. Pools not deleted", fsName) - } - - return nil -} - -func deleteFSPools(context *clusterd.Context, clusterInfo *ClusterInfo, fs *CephFilesystemDetails) error { - poolNames, err := GetPoolNamesByID(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get pool names") - } - - var lastErr error = nil - - // delete the metadata pool - if err := deleteFSPool(context, clusterInfo, poolNames, fs.MDSMap.MetadataPool); err != nil { - lastErr = err - } - - // delete the data pools - for _, poolID := range fs.MDSMap.DataPools { - if err := deleteFSPool(context, clusterInfo, poolNames, poolID); err != nil { - lastErr = err - } - } - - return lastErr -} - -func deleteFSPool(context *clusterd.Context, clusterInfo *ClusterInfo, poolNames map[int]string, id int) error { - name, ok := poolNames[id] - if !ok { - return errors.Errorf("pool %d not found", id) - } - return DeletePool(context, clusterInfo, name) -} - -// WaitForNoStandbys waits for all standbys go away -func WaitForNoStandbys(context *clusterd.Context, clusterInfo *ClusterInfo, timeout time.Duration) error { - err := wait.Poll(3*time.Second, timeout, func() (bool, error) { - mdsDump, err := GetMDSDump(context, clusterInfo) - if err != nil { - logger.Errorf("failed to get fs dump. %v", err) - return false, nil - } - return len(mdsDump.Standbys) == 0, nil - }) - - if err != nil { - return errors.Wrap(err, "timeout waiting for no standbys") - } - return nil -} - -func GetMDSDump(context *clusterd.Context, clusterInfo *ClusterInfo) (*MDSDump, error) { - args := []string{"fs", "dump"} - cmd := NewCephCommand(context, clusterInfo, args) - buf, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to dump fs info") - } - var dump MDSDump - if err := json.Unmarshal(buf, &dump); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal fs dump. %s", buf) - } - return &dump, nil -} diff --git a/pkg/daemon/ceph/client/filesystem_mirror.go b/pkg/daemon/ceph/client/filesystem_mirror.go deleted file mode 100644 index dfe6151cc..000000000 --- a/pkg/daemon/ceph/client/filesystem_mirror.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - "fmt" - "strings" - "syscall" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/util/exec" -) - -type BootstrapPeerToken struct { - Token string `json:"token"` -} - -// RemoveFilesystemMirrorPeer add a mirror peer in the cephfs-mirror configuration -func RemoveFilesystemMirrorPeer(context *clusterd.Context, clusterInfo *ClusterInfo, peerUUID string) error { - logger.Infof("removing cephfs-mirror peer %q", peerUUID) - - // Build command - args := []string{"fs", "snapshot", "mirror", "peer_remove", peerUUID} - cmd := NewCephCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to remove cephfs-mirror peer for filesystem %q. %s", peerUUID, output) - } - - logger.Infof("successfully removed cephfs-mirror peer %q", peerUUID) - return nil -} - -// EnableFilesystemSnapshotMirror enables filesystem snapshot mirroring -func EnableFilesystemSnapshotMirror(context *clusterd.Context, clusterInfo *ClusterInfo, filesystem string) error { - logger.Infof("enabling ceph filesystem snapshot mirror for filesystem %q", filesystem) - - // Build command - args := []string{"fs", "snapshot", "mirror", "enable", filesystem} - cmd := NewCephCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to enable ceph filesystem snapshot mirror for filesystem %q. %s", filesystem, output) - } - - logger.Infof("successfully enabled ceph filesystem snapshot mirror for filesystem %q", filesystem) - return nil -} - -// DisableFilesystemSnapshotMirror enables filesystem snapshot mirroring -func DisableFilesystemSnapshotMirror(context *clusterd.Context, clusterInfo *ClusterInfo, filesystem string) error { - logger.Infof("disabling ceph filesystem snapshot mirror for filesystem %q", filesystem) - - // Build command - args := []string{"fs", "snapshot", "mirror", "disable", filesystem} - cmd := NewCephCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOTSUP) { - logger.Debug("filesystem mirroring is not enabled, nothing to disable") - return nil - } - return errors.Wrapf(err, "failed to disable ceph filesystem snapshot mirror for filesystem %q. %s", filesystem, output) - } - - logger.Infof("successfully disabled ceph filesystem snapshot mirror for filesystem %q", filesystem) - return nil -} - -func AddSnapshotSchedule(context *clusterd.Context, clusterInfo *ClusterInfo, path, interval, startTime, filesystem string) error { - logger.Infof("adding snapshot schedule every %q to ceph filesystem %q on path %q", interval, filesystem, path) - - args := []string{"fs", "snap-schedule", "add", path, interval} - if startTime != "" { - args = append(args, startTime) - } - args = append(args, fmt.Sprintf("fs=%s", filesystem)) - cmd := NewCephCommand(context, clusterInfo, args) - cmd.JsonOutput = false - // Example command: "ceph fs snap-schedule add / 4d fs=myfs2" - - // CHANGE time for "2014-01-09T21:48:00" IF interval - // Run command - output, err := cmd.Run() - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code != int(syscall.EEXIST) { - return errors.Wrapf(err, "failed to add snapshot schedule every %q to ceph filesystem %q on path %q. %s", interval, filesystem, path, output) - } - } - - logger.Infof("successfully added snapshot schedule every %q to ceph filesystem %q on path %q", interval, filesystem, path) - return nil -} - -func AddSnapshotScheduleRetention(context *clusterd.Context, clusterInfo *ClusterInfo, path, duration, filesystem string) error { - logger.Infof("adding snapshot schedule retention %s to ceph filesystem %q on path %q", duration, filesystem, path) - - // Example command: "ceph fs snap-schedule retention add / d 1 fs=myfs2" - args := []string{"fs", "snap-schedule", "retention", "add", path, duration, fmt.Sprintf("fs=%s", filesystem)} - cmd := NewCephCommand(context, clusterInfo, args) - cmd.JsonOutput = false - - // Run command - output, err := cmd.Run() - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - logger.Warningf("snapshot schedule retention %s already exists for filesystem %q on path %q. %s", duration, filesystem, path, output) - } else { - return errors.Wrapf(err, "failed to add snapshot schedule retention %s to ceph filesystem %q on path %q. %s", duration, filesystem, path, output) - } - } - - logger.Infof("successfully added snapshot schedule retention %s to ceph filesystem %q on path %q", duration, filesystem, path) - return nil -} - -func GetSnapshotScheduleStatus(context *clusterd.Context, clusterInfo *ClusterInfo, filesystem string) ([]cephv1.FilesystemSnapshotSchedulesSpec, error) { - logger.Infof("retrieving snapshot schedule status for ceph filesystem %q", filesystem) - - args := []string{"fs", "snap-schedule", "status", "/", "recursive=true", fmt.Sprintf("--fs=%s", filesystem)} - cmd := NewCephCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve snapshot schedule status for ceph filesystem %q. %s", filesystem, output) - } - - // Unmarshal JSON into Go struct - var filesystemSnapshotSchedulesStatusSpec []cephv1.FilesystemSnapshotSchedulesSpec - - /* Replace new line since the command outputs a new line first and breaks the json parsing... - [root@rook-ceph-operator-75c6d6bbfc-wqlnc /]# ceph --connect-timeout=15 --cluster=rook-ceph --conf=/var/lib/rook/rook-ceph/rook-ceph.config --name=client.admin --keyring=/var/lib/rook/rook-ceph/client.admin.keyring --format json fs snap-schedule status / - - [{"fs": "myfs", "subvol": null, "path": "/", "rel_path": "/", "schedule": "24h", "retention": {"h": 24}, "start": "2021-07-01T00:00:00", "created": "2021-07-01T12:19:12", "first": null, "last": null, "last_pruned": null, "created_count": 0, "pruned_count": 0, "active": true},{"fs": "myfs", "subvol": null, "path": "/", "rel_path": "/", "schedule": "25h", "retention": {"h": 24}, "start": "2021-07-01T00:00:00", "created": "2021-07-01T12:31:25", "first": null, "last": null, "last_pruned": null, "created_count": 0, "pruned_count": 0, "active": true}] - */ - if err := json.Unmarshal([]byte(strings.ReplaceAll(string(output), "\n", "")), &filesystemSnapshotSchedulesStatusSpec); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal filesystem mirror snapshot schedule status response") - } - - logger.Infof("successfully retrieved snapshot schedule status for ceph filesystem %q", filesystem) - return filesystemSnapshotSchedulesStatusSpec, nil -} - -// ImportFSMirrorBootstrapPeer add a mirror peer in the cephfs-mirror configuration -func ImportFSMirrorBootstrapPeer(context *clusterd.Context, clusterInfo *ClusterInfo, fsName, token string) error { - logger.Infof("importing cephfs bootstrap peer token for filesystem %q", fsName) - - // Build command - args := []string{"fs", "snapshot", "mirror", "peer_bootstrap", "import", fsName, token} - cmd := NewCephCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to import cephfs-mirror peer token for filesystem %q. %s", fsName, output) - } - - logger.Infof("successfully imported cephfs-mirror peer for filesystem %q", fsName) - return nil -} - -// CreateFSMirrorBootstrapPeer add a mirror peer in the cephfs-mirror configuration -func CreateFSMirrorBootstrapPeer(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string) ([]byte, error) { - logger.Infof("create cephfs-mirror bootstrap peer token for filesystem %q", fsName) - - // Build command - args := []string{"fs", "snapshot", "mirror", "peer_bootstrap", "create", fsName, "client.mirror", clusterInfo.FSID} - cmd := NewCephCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to create cephfs-mirror peer token for filesystem %q. %s", fsName, output) - } - - // Unmarshal JSON into Go struct - var bootstrapPeerToken BootstrapPeerToken - if err := json.Unmarshal(output, &bootstrapPeerToken); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal cephfs-mirror peer token create response. %s", output) - } - - logger.Infof("successfully created cephfs-mirror bootstrap peer token for filesystem %q", fsName) - return []byte(bootstrapPeerToken.Token), nil -} - -// GetFSMirrorDaemonStatus returns the mirroring status of a given filesystem -func GetFSMirrorDaemonStatus(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string) ([]cephv1.FilesystemMirroringInfo, error) { - // Using Debug level since this is called in a recurrent go routine - logger.Debugf("retrieving filesystem mirror status for filesystem %q", fsName) - - // Build command - args := []string{"fs", "snapshot", "mirror", "daemon", "status", fsName} - cmd := NewCephCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve filesystem mirror status for filesystem %q. %s", fsName, output) - } - - // Unmarshal JSON into Go struct - var filesystemMirroringInfo []cephv1.FilesystemMirroringInfo - if err := json.Unmarshal([]byte(output), &filesystemMirroringInfo); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal filesystem mirror status response. %q.", string(output)) - } - - logger.Debugf("successfully retrieved filesystem mirror status for filesystem %q", fsName) - return filesystemMirroringInfo, nil -} diff --git a/pkg/daemon/ceph/client/filesystem_mirror_test.go b/pkg/daemon/ceph/client/filesystem_mirror_test.go deleted file mode 100644 index 30d83924f..000000000 --- a/pkg/daemon/ceph/client/filesystem_mirror_test.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/base64" - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -var ( - // response of "ceph fs snapshot mirror peer_bootstrap create myfs2 client.mirror test" - // #nosec G101 since this is not leaking any credentials - fsMirrorToken = `{"token": "eyJmc2lkIjogIjgyYjdlZDkyLTczYjAtNGIyMi1hOGI3LWVkOTQ4M2UyODc1NiIsICJmaWxlc3lzdGVtIjogIm15ZnMyIiwgInVzZXIiOiAiY2xpZW50Lm1pcnJvciIsICJzaXRlX25hbWUiOiAidGVzdCIsICJrZXkiOiAiQVFEVVAxSmdqM3RYQVJBQWs1cEU4cDI1ZUhld2lQK0ZXRm9uOVE9PSIsICJtb25faG9zdCI6ICJbdjI6MTAuOTYuMTQyLjIxMzozMzAwLHYxOjEwLjk2LjE0Mi4yMTM6Njc4OV0sW3YyOjEwLjk2LjIxNy4yMDc6MzMwMCx2MToxMC45Ni4yMTcuMjA3OjY3ODldLFt2MjoxMC45OS4xMC4xNTc6MzMwMCx2MToxMC45OS4xMC4xNTc6Njc4OV0ifQ=="}` - - // response of "ceph fs snapshot mirror daemon status myfs" - // fsMirrorDaemonStatus = `{ "daemon_id": "444607", "filesystems": [ { "filesystem_id": "1", "name": "myfs", "directory_count": 0, "peers": [ { "uuid": "4a6983c0-3c9d-40f5-b2a9-2334a4659827", "remote": { "client_name": "client.mirror_remote", "cluster_name": "site-remote", "fs_name": "backup_fs" }, "stats": { "failure_count": 0, "recovery_count": 0 } } ] } ] }` - fsMirrorDaemonStatusNew = `[{"daemon_id":25103, "filesystems": [{"filesystem_id": 1, "name": "myfs", "directory_count": 0, "peers": []}]}]` -) - -func TestEnableFilesystemSnapshotMirror(t *testing.T) { - fs := "myfs" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "fs" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "mirror", args[2]) - assert.Equal(t, "enable", args[3]) - assert.Equal(t, fs, args[4]) - return "", nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - err := EnableFilesystemSnapshotMirror(context, AdminClusterInfo("mycluster"), fs) - assert.NoError(t, err) -} - -func TestDisableFilesystemSnapshotMirror(t *testing.T) { - fs := "myfs" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "fs" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "mirror", args[2]) - assert.Equal(t, "disable", args[3]) - assert.Equal(t, fs, args[4]) - return "", nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - err := DisableFilesystemSnapshotMirror(context, AdminClusterInfo("mycluster"), fs) - assert.NoError(t, err) -} - -func TestImportFilesystemMirrorPeer(t *testing.T) { - fs := "myfs" - token := "my-token" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "fs" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "mirror", args[2]) - assert.Equal(t, "peer_bootstrap", args[3]) - assert.Equal(t, "import", args[4]) - assert.Equal(t, fs, args[5]) - assert.Equal(t, token, args[6]) - return "", nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - err := ImportFSMirrorBootstrapPeer(context, AdminClusterInfo("mycluster"), fs, token) - assert.NoError(t, err) -} - -func TestCreateFSMirrorBootstrapPeer(t *testing.T) { - fs := "myfs" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "fs" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "mirror", args[2]) - assert.Equal(t, "peer_bootstrap", args[3]) - assert.Equal(t, "create", args[4]) - assert.Equal(t, fs, args[5]) - return fsMirrorToken, nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - token, err := CreateFSMirrorBootstrapPeer(context, AdminClusterInfo("mycluster"), fs) - assert.NoError(t, err) - _, err = base64.StdEncoding.DecodeString(string(token)) - assert.NoError(t, err) - -} - -func TestRemoveFilesystemMirrorPeer(t *testing.T) { - peerUUID := "peer-uuid" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "mirror", args[2]) - assert.Equal(t, "peer_remove", args[3]) - assert.Equal(t, peerUUID, args[4]) - return "", nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - err := RemoveFilesystemMirrorPeer(context, AdminClusterInfo("mycluster"), peerUUID) - assert.NoError(t, err) -} - -func TestFSMirrorDaemonStatus(t *testing.T) { - fs := "myfs" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "fs" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "mirror", args[2]) - assert.Equal(t, "daemon", args[3]) - assert.Equal(t, "status", args[4]) - assert.Equal(t, fs, args[5]) - return fsMirrorDaemonStatusNew, nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - s, err := GetFSMirrorDaemonStatus(context, AdminClusterInfo("mycluster"), fs) - assert.NoError(t, err) - assert.Equal(t, "myfs", s[0].Filesystems[0].Name) -} diff --git a/pkg/daemon/ceph/client/filesystem_test.go b/pkg/daemon/ceph/client/filesystem_test.go deleted file mode 100644 index 59df88c44..000000000 --- a/pkg/daemon/ceph/client/filesystem_test.go +++ /dev/null @@ -1,589 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - "fmt" - "testing" - "time" - - "github.com/pkg/errors" - exectest "github.com/rook/rook/pkg/util/exec/test" - - "github.com/rook/rook/pkg/clusterd" - "github.com/stretchr/testify/assert" -) - -const ( - // this JSON was generated from the mon_command "fs ls", ExecuteMonCommand(conn, map[string]interface{}{"prefix": "fs ls"}) - cephFilesystemListResponseRaw = `[{"name":"myfs1","metadata_pool":"myfs1-metadata","metadata_pool_id":2,"data_pool_ids":[1],"data_pools":["myfs1-data"]}]` - - // this JSON was generated from the mon_command "fs get", ExecuteMonCommand(conn, map[string]interface{}{"prefix": "fs get","fs_name": fsName,}) - cephFilesystemGetResponseRaw = `{"mdsmap":{"epoch":6,"flags":1,"ever_allowed_features":0,"explicitly_allowed_features":0,"created":"2016-11-30 08:35:06.416438","modified":"2016-11-30 08:35:06.416438","tableserver":0,"root":0,"session_timeout":60,"session_autoclose":300,"max_file_size":1099511627776,"last_failure":0,"last_failure_osd_epoch":0,"compat":{"compat":{},"ro_compat":{},"incompat":{"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs","feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap","feature_8":"file layout v2"}},"max_mds":1,"in":[0],"up":{"mds_0":4107},"failed":[],"damaged":[],"stopped":[],"info":{"gid_4107":{"gid":4107,"name":"1","rank":0,"incarnation":4,"state":"up:active","state_seq":3,"addr":"127.0.0.1:6804\/2981621686","standby_for_rank":-1,"standby_for_fscid":-1,"standby_for_name":"","standby_replay":false,"export_targets":[],"features":1152921504336314367}},"data_pools":[1],"metadata_pool":2,"enabled":true,"fs_name":"myfs1","balancer":""},"id":1}` -) - -func TestFilesystemListMarshal(t *testing.T) { - var filesystems []CephFilesystem - err := json.Unmarshal([]byte(cephFilesystemListResponseRaw), &filesystems) - assert.Nil(t, err) - - // create the expected file systems listing object - expectedFilesystems := []CephFilesystem{ - { - Name: "myfs1", - MetadataPool: "myfs1-metadata", - MetadataPoolID: 2, - DataPools: []string{"myfs1-data"}, - DataPoolIDs: []int{1}}, - } - - assert.Equal(t, expectedFilesystems, filesystems) -} - -func TestFilesystemGetMarshal(t *testing.T) { - var fs CephFilesystemDetails - err := json.Unmarshal([]byte(cephFilesystemGetResponseRaw), &fs) - assert.Nil(t, err) - - // create the expected file system details object - expectedFS := CephFilesystemDetails{ - ID: 1, - MDSMap: MDSMap{ - FilesystemName: "myfs1", - Enabled: true, - Root: 0, - TableServer: 0, - MaxMDS: 1, - MetadataPool: 2, - DataPools: []int{1}, - In: []int{0}, - Up: map[string]int{"mds_0": 4107}, - Failed: []int{}, - Damaged: []int{}, - Stopped: []int{}, - Info: map[string]MDSInfo{ - "gid_4107": { - GID: 4107, - Name: "1", - Rank: 0, - State: "up:active", - Address: "127.0.0.1:6804/2981621686", - }, - }, - }, - } - - assert.Equal(t, expectedFS, fs) -} - -func TestFilesystemRemove(t *testing.T) { - dataDeleted := false - metadataDeleted := false - crushDeleted := false - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - fs := CephFilesystemDetails{ - ID: 1, - MDSMap: MDSMap{ - FilesystemName: "myfs1", - MetadataPool: 2, - DataPools: []int{1}, - }, - } - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "get" { - output, err := json.Marshal(fs) - assert.Nil(t, err) - return string(output), nil - } - if args[1] == "rm" { - return "", nil - } - } - if args[0] == "osd" { - if args[1] == "lspools" { - pools := []*CephStoragePoolSummary{ - {Name: "mydata", Number: 1}, - {Name: "mymetadata", Number: 2}, - } - output, err := json.Marshal(pools) - assert.Nil(t, err) - return string(output), nil - } - if args[1] == "pool" { - if args[2] == "get" { - return `{"pool_id":1}`, nil - } - if args[2] == "delete" { - if args[3] == "mydata" { - dataDeleted = true - return "", nil - } - if args[3] == "mymetadata" { - metadataDeleted = true - return "", nil - } - } - } - if args[1] == "crush" { - assert.Equal(t, "rule", args[2]) - assert.Equal(t, "rm", args[3]) - crushDeleted = true - return "", nil - } - } - emptyPool := "{\"images\":{\"count\":0,\"provisioned_bytes\":0,\"snap_count\":0},\"trash\":{\"count\":1,\"provisioned_bytes\":2048,\"snap_count\":0}}" - if args[0] == "pool" { - if args[1] == "stats" { - return emptyPool, nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - err := RemoveFilesystem(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, false) - assert.Nil(t, err) - assert.True(t, metadataDeleted) - assert.True(t, dataDeleted) - assert.True(t, crushDeleted) -} - -func TestFailAllStandbyReplayMDS(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - failedGids := make([]string, 0) - fs := CephFilesystemDetails{ - ID: 1, - MDSMap: MDSMap{ - FilesystemName: "myfs1", - MetadataPool: 2, - Up: map[string]int{ - "mds_0": 123, - }, - DataPools: []int{3}, - Info: map[string]MDSInfo{ - "gid_123": { - GID: 123, - State: "up:active", - Name: fmt.Sprintf("%s-%s", "myfs1", "a"), - }, - "gid_124": { - GID: 124, - State: "up:standby-replay", - Name: fmt.Sprintf("%s-%s", "myfs1", "b"), - }, - }, - }, - } - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "get" { - output, err := json.Marshal(fs) - assert.Nil(t, err) - return string(output), nil - } - if args[1] == "rm" { - return "", nil - } - } - if args[0] == "mds" { - if args[1] == "fail" { - failedGids = append(failedGids, args[2]) - return "", nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - err := FailAllStandbyReplayMDS(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName) - assert.NoError(t, err) - assert.ElementsMatch(t, failedGids, []string{"124"}) - - fs = CephFilesystemDetails{ - ID: 1, - MDSMap: MDSMap{ - FilesystemName: "myfs1", - MetadataPool: 2, - Up: map[string]int{ - "mds_0": 123, - }, - DataPools: []int{3}, - Info: map[string]MDSInfo{ - "gid_123": { - GID: 123, - State: "up:active", - Name: fmt.Sprintf("%s-%s", "myfs1", "a"), - }, - "gid_124": { - GID: 124, - State: "up:standby", - Name: fmt.Sprintf("%s-%s", "myfs1", "b"), - }, - }, - }, - } - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "get" { - output, err := json.Marshal(fs) - assert.Nil(t, err) - return string(output), nil - } - if args[1] == "rm" { - return "", nil - } - } - if args[0] == "mds" { - if args[1] == "fail" { - return "", errors.Errorf("unexpected execution of mds fail") - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - err = FailAllStandbyReplayMDS(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName) - assert.NoError(t, err) - - fs = CephFilesystemDetails{ - ID: 1, - MDSMap: MDSMap{ - FilesystemName: "myfs1", - MetadataPool: 2, - Up: map[string]int{ - "mds_0": 123, - }, - DataPools: []int{3}, - Info: map[string]MDSInfo{ - "gid_123": { - GID: 123, - State: "up:active", - Name: fmt.Sprintf("%s-%s", "myfs1", "a"), - }, - "gid_124": { - GID: 124, - State: "up:standby-replay", - Name: fmt.Sprintf("%s-%s", "myfs1", "b"), - }, - }, - }, - } - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "get" { - output, err := json.Marshal(fs) - assert.Nil(t, err) - return string(output), nil - } - if args[1] == "rm" { - return "", nil - } - } - if args[0] == "mds" { - if args[1] == "fail" { - return "", errors.Errorf("expected execution of mds fail") - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - err = FailAllStandbyReplayMDS(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName) - assert.Error(t, err) - assert.Contains(t, err.Error(), "expected execution of mds fail") -} - -func TestGetMdsIdByRank(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - fs := CephFilesystemDetails{ - ID: 1, - MDSMap: MDSMap{ - FilesystemName: "myfs1", - MetadataPool: 2, - Up: map[string]int{ - "mds_0": 123, - }, - DataPools: []int{3}, - Info: map[string]MDSInfo{ - "gid_123": { - GID: 123, - State: "up:active", - Name: fmt.Sprintf("%s-%s", "myfs1", "a"), - }, - "gid_124": { - GID: 124, - State: "up:standby-replay", - Name: fmt.Sprintf("%s-%s", "myfs1", "b"), - }, - }, - }, - } - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "get" { - output, err := json.Marshal(fs) - assert.Nil(t, err) - return string(output), nil - } - if args[1] == "rm" { - return "", nil - } - } - if args[0] == "mds" { - if args[1] == "fail" { - return "", nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - name, err := GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) - assert.Equal(t, name, "myfs1-a") - assert.NoError(t, err) - - // test errors - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "get" { - return "", errors.Errorf("test ceph fs get error") - } - if args[1] == "rm" { - return "", nil - } - } - if args[0] == "mds" { - if args[1] == "fail" { - return "", nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - name, err = GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) - assert.Equal(t, "", name) - assert.Error(t, err) - assert.Contains(t, err.Error(), "test ceph fs get error") - - fs = CephFilesystemDetails{ - ID: 1, - MDSMap: MDSMap{ - FilesystemName: "myfs1", - MetadataPool: 2, - Up: map[string]int{ - "mds_1": 123, - }, - DataPools: []int{3}, - Info: map[string]MDSInfo{ - "gid_123": { - GID: 123, - State: "up:active", - Name: fmt.Sprintf("%s-%s", "myfs1", "a"), - }, - "gid_124": { - GID: 124, - State: "up:standby-replay", - Name: fmt.Sprintf("%s-%s", "myfs1", "b"), - }, - }, - }, - } - // test get mds by id failed error - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "get" { - output, err := json.Marshal(fs) - assert.Nil(t, err) - return string(output), nil - } - if args[1] == "rm" { - return "", nil - } - } - if args[0] == "mds" { - if args[1] == "fail" { - return "", nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - name, err = GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) - assert.Equal(t, "", name) - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to get mds gid from rank 0") - - fs = CephFilesystemDetails{ - ID: 1, - MDSMap: MDSMap{ - FilesystemName: "myfs1", - MetadataPool: 2, - Up: map[string]int{ - "mds_0": 123, - }, - DataPools: []int{3}, - Info: map[string]MDSInfo{ - "gid_122": { - GID: 123, - State: "up:active", - Name: fmt.Sprintf("%s-%s", "myfs1", "a"), - }, - "gid_124": { - GID: 124, - State: "up:standby-replay", - Name: fmt.Sprintf("%s-%s", "myfs1", "b"), - }, - }, - }, - } - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "get" { - output, err := json.Marshal(fs) - assert.Nil(t, err) - return string(output), nil - } - if args[1] == "rm" { - return "", nil - } - } - if args[0] == "mds" { - if args[1] == "fail" { - return "", nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - name, err = GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) - assert.Equal(t, "", name) - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to get mds info for rank 0") -} - -func TestGetMDSDump(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "dump" { - output := `{"epoch":12,"default_fscid":1,"compat":{"compat":{},"ro_compat":{},"incompat": - {"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs", - "feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap", - "feature_8":"no anchor table","feature_9":"file layout v2","feature_10":"snaprealm v2"}},"feature_flags": - {"enable_multiple":false,"ever_enabled_multiple":false},"standbys":[{"gid":26829,"name":"rook-ceph-filesystem-b","rank":-1,"incarnation":0,"state":"up:standby", - "state_seq":1,"addr":"10.110.29.245:6805/3170687682","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.245:6804","nonce":3170687682},{"type":"v1","addr":"10.110.29.245:6805","nonce":3170687682}]},"export_targets":[],"features":4611087854035861503,"flags":0,"epoch":12}],"filesystems":[{"mdsmap":{"epoch":11,"flags":18,"ever_allowed_features":32,"explicitly_allowed_features":32,"created":"2021-04-23 01:52:33.467863", - "modified":"2021-04-23 08:31:03.019621","tableserver":0,"root":0,"session_timeout":60,"session_autoclose":300,"min_compat_client":"-1 (unspecified)","max_file_size":1099511627776,"last_failure":0,"last_failure_osd_epoch":0,"compat":{"compat":{},"ro_compat":{},"incompat":{"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs","feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap","feature_8":"no anchor table","feature_9":"file layout v2", - "feature_10":"snaprealm v2"}},"max_mds":1,"in":[0],"up":{"mds_0":14707},"failed":[],"damaged":[],"stopped":[],"info":{"gid_14707":{"gid":14707,"name":"rook-ceph-filesystem-a","rank":0,"incarnation":5,"state":"up:active","state_seq":2,"addr":"10.110.29.236:6807/1996297745","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.236:6806","nonce":1996297745}, - {"type":"v1","addr":"10.110.29.236:6807","nonce":1996297745}]},"export_targets":[],"features":4611087854035861503,"flags":0}},"data_pools":[3],"metadata_pool":2,"enabled":true,"fs_name":"rook-ceph-filesystem","balancer":"","standby_count_wanted":1},"id":1}]}` - return output, nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - mdsDump, err := GetMDSDump(context, AdminClusterInfo("mycluster")) - assert.NoError(t, err) - assert.ElementsMatch(t, mdsDump.Standbys, []MDSStandBy{{Name: "rook-ceph-filesystem-b", Rank: -1}}) - - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "dump" { - return "", errors.Errorf("dump fs failed") - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - _, err = GetMDSDump(context, AdminClusterInfo("mycluster")) - assert.Error(t, err) -} - -func TestWaitForNoStandbys(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "dump" { - output := `{"epoch":12,"default_fscid":1,"compat":{"compat":{},"ro_compat":{},"incompat": - {"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs", - "feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap", - "feature_8":"no anchor table","feature_9":"file layout v2","feature_10":"snaprealm v2"}},"feature_flags": - {"enable_multiple":false,"ever_enabled_multiple":false},"standbys":[{"gid":26829,"name":"rook-ceph-filesystem-b","rank":-1,"incarnation":0,"state":"up:standby", - "state_seq":1,"addr":"10.110.29.245:6805/3170687682","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.245:6804","nonce":3170687682},{"type":"v1","addr":"10.110.29.245:6805","nonce":3170687682}]},"export_targets":[],"features":4611087854035861503,"flags":0,"epoch":12}],"filesystems":[{"mdsmap":{"epoch":11,"flags":18,"ever_allowed_features":32,"explicitly_allowed_features":32,"created":"2021-04-23 01:52:33.467863", - "modified":"2021-04-23 08:31:03.019621","tableserver":0,"root":0,"session_timeout":60,"session_autoclose":300,"min_compat_client":"-1 (unspecified)","max_file_size":1099511627776,"last_failure":0,"last_failure_osd_epoch":0,"compat":{"compat":{},"ro_compat":{},"incompat":{"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs","feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap","feature_8":"no anchor table","feature_9":"file layout v2", - "feature_10":"snaprealm v2"}},"max_mds":1,"in":[0],"up":{"mds_0":14707},"failed":[],"damaged":[],"stopped":[],"info":{"gid_14707":{"gid":14707,"name":"rook-ceph-filesystem-a","rank":0,"incarnation":5,"state":"up:active","state_seq":2,"addr":"10.110.29.236:6807/1996297745","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.236:6806","nonce":1996297745}, - {"type":"v1","addr":"10.110.29.236:6807","nonce":1996297745}]},"export_targets":[],"features":4611087854035861503,"flags":0}},"data_pools":[3],"metadata_pool":2,"enabled":true,"fs_name":"rook-ceph-filesystem","balancer":"","standby_count_wanted":1},"id":1}]}` - return output, nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - err := WaitForNoStandbys(context, AdminClusterInfo("mycluster"), 6*time.Second) - assert.Error(t, err) - - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "dump" { - return "", errors.Errorf("failed to dump fs info") - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - err = WaitForNoStandbys(context, AdminClusterInfo("mycluster"), 6*time.Second) - assert.Error(t, err) - - firstCall := true - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "fs" { - if args[1] == "dump" { - if firstCall { - firstCall = false - output := `{"epoch":12,"default_fscid":1,"compat":{"compat":{},"ro_compat":{},"incompat": - {"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs", - "feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap", - "feature_8":"no anchor table","feature_9":"file layout v2","feature_10":"snaprealm v2"}},"feature_flags": - {"enable_multiple":false,"ever_enabled_multiple":false},"standbys":[{"gid":26829,"name":"rook-ceph-filesystem-b","rank":-1,"incarnation":0,"state":"up:standby", - "state_seq":1,"addr":"10.110.29.245:6805/3170687682","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.245:6804","nonce":3170687682},{"type":"v1","addr":"10.110.29.245:6805","nonce":3170687682}]},"export_targets":[],"features":4611087854035861503,"flags":0,"epoch":12}],"filesystems":[{"mdsmap":{"epoch":11,"flags":18,"ever_allowed_features":32,"explicitly_allowed_features":32,"created":"2021-04-23 01:52:33.467863", - "modified":"2021-04-23 08:31:03.019621","tableserver":0,"root":0,"session_timeout":60,"session_autoclose":300,"min_compat_client":"-1 (unspecified)","max_file_size":1099511627776,"last_failure":0,"last_failure_osd_epoch":0,"compat":{"compat":{},"ro_compat":{},"incompat":{"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs","feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap","feature_8":"no anchor table","feature_9":"file layout v2", - "feature_10":"snaprealm v2"}},"max_mds":1,"in":[0],"up":{"mds_0":14707},"failed":[],"damaged":[],"stopped":[],"info":{"gid_14707":{"gid":14707,"name":"rook-ceph-filesystem-a","rank":0,"incarnation":5,"state":"up:active","state_seq":2,"addr":"10.110.29.236:6807/1996297745","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.236:6806","nonce":1996297745}, - {"type":"v1","addr":"10.110.29.236:6807","nonce":1996297745}]},"export_targets":[],"features":4611087854035861503,"flags":0}},"data_pools":[3],"metadata_pool":2,"enabled":true,"fs_name":"rook-ceph-filesystem","balancer":"","standby_count_wanted":1},"id":1}]}` - return output, nil - } - - return `{"standbys":[],"filesystemds":[]}`, nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - err = WaitForNoStandbys(context, AdminClusterInfo("mycluster"), 6*time.Second) - assert.NoError(t, err) - -} diff --git a/pkg/daemon/ceph/client/image.go b/pkg/daemon/ceph/client/image.go deleted file mode 100644 index 3495aff78..000000000 --- a/pkg/daemon/ceph/client/image.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "encoding/json" - "fmt" - "syscall" - - "strconv" - - "regexp" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/util/display" - "github.com/rook/rook/pkg/util/exec" -) - -const ( - ImageMinSize = uint64(1048576) // 1 MB -) - -type CephBlockImage struct { - Name string `json:"image"` - Size uint64 `json:"size"` - Format int `json:"format"` - InfoName string `json:"name"` -} - -func ListImages(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string) ([]CephBlockImage, error) { - args := []string{"ls", "-l", poolName} - cmd := NewRBDCommand(context, clusterInfo, args) - cmd.JsonOutput = true - buf, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to list images for pool %s", poolName) - } - - //The regex expression captures the json result at the end buf - //When logLevel is DEBUG buf contains log statements of librados (see tests for examples) - //It can happen that the end of the "real" output doesn't not contain a new line - //that's why looking for the end isn't an option here (anymore?) - res := regexp.MustCompile(`(?m)^\[(.*)\]`).FindStringSubmatch(string(buf)) - if len(res) == 0 { - return []CephBlockImage{}, nil - } - buf = []byte(res[0]) - - var images []CephBlockImage - if err = json.Unmarshal(buf, &images); err != nil { - return nil, errors.Wrapf(err, "unmarshal failed, raw buffer response: %s", string(buf)) - } - - return images, nil -} - -// CreateImage creates a block storage image. -// If dataPoolName is not empty, the image will use poolName as the metadata pool and the dataPoolname for data. -// If size is zero an empty image will be created. Otherwise, an image will be -// created with a size rounded up to the nearest Mi. The adjusted image size is -// placed in return value CephBlockImage.Size. -func CreateImage(context *clusterd.Context, clusterInfo *ClusterInfo, name, poolName, dataPoolName string, size uint64) (*CephBlockImage, error) { - if size > 0 && size < ImageMinSize { - // rbd tool uses MB as the smallest unit for size input. 0 is OK but anything else smaller - // than 1 MB should just be rounded up to 1 MB. - logger.Warningf("requested image size %d is less than the minimum size of %d, using the minimum.", size, ImageMinSize) - size = ImageMinSize - } - - // Roundup the size of the volume image since we only create images on 1MB bundaries and we should never create an image - // size that's smaller than the requested one, e.g, requested 1048698 bytes should be 2MB while not be truncated to 1MB - sizeMB := int((size + ImageMinSize - 1) / ImageMinSize) - - imageSpec := getImageSpec(name, poolName) - - args := []string{"create", imageSpec, "--size", strconv.Itoa(sizeMB)} - - if dataPoolName != "" { - args = append(args, fmt.Sprintf("--data-pool=%s", dataPoolName)) - } - logger.Infof("creating rbd image %q with size %dMB in pool %q", imageSpec, sizeMB, dataPoolName) - - buf, err := NewRBDCommand(context, clusterInfo, args).Run() - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.EEXIST) { - // Image with the same name already exists in the given rbd pool. Continuing with the link to PV. - logger.Warningf("Requested image %s exists in pool %s. Continuing", name, poolName) - } else { - return nil, errors.Wrapf(err, "failed to create image %s in pool %s of size %d, output: %s", - name, poolName, size, string(buf)) - } - } - - // report the adjusted size which will always be >= to the requested size - var newSizeBytes uint64 - if sizeMB > 0 { - newSizeBytes = display.MbTob(uint64(sizeMB)) - } else { - newSizeBytes = 0 - } - - return &CephBlockImage{Name: name, Size: newSizeBytes}, nil -} - -func DeleteImage(context *clusterd.Context, clusterInfo *ClusterInfo, name, poolName string) error { - logger.Infof("deleting rbd image %q from pool %q", name, poolName) - imageSpec := getImageSpec(name, poolName) - args := []string{"rm", imageSpec} - buf, err := NewRBDCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to delete image %s in pool %s, output: %s", - name, poolName, string(buf)) - } - - return nil -} - -func ExpandImage(context *clusterd.Context, clusterInfo *ClusterInfo, name, poolName, monitors, keyring string, size uint64) error { - logger.Infof("expanding rbd image %q in pool %q to size %dMB", name, poolName, display.BToMb(size)) - imageSpec := getImageSpec(name, poolName) - args := []string{ - "resize", - imageSpec, - fmt.Sprintf("--size=%s", strconv.FormatUint(size, 10)), - fmt.Sprintf("--cluster=%s", clusterInfo.Namespace), - fmt.Sprintf("--keyring=%s", keyring), - "-m", monitors, - } - output, err := ExecuteRBDCommandWithTimeout(context, args) - if err != nil { - return errors.Wrapf(err, "failed to resize image %s in pool %s, output: %s", name, poolName, string(output)) - } - return nil -} - -// MapImage maps an RBD image using admin cephfx and returns the device path -func MapImage(context *clusterd.Context, clusterInfo *ClusterInfo, imageName, poolName, id, keyring, monitors string) error { - imageSpec := getImageSpec(imageName, poolName) - args := []string{ - "map", - imageSpec, - fmt.Sprintf("--id=%s", id), - fmt.Sprintf("--cluster=%s", clusterInfo.Namespace), - fmt.Sprintf("--keyring=%s", keyring), - "-m", monitors, - "--conf=/dev/null", // no config file needed because we are passing all required config as arguments - } - - output, err := ExecuteRBDCommandWithTimeout(context, args) - if err != nil { - return errors.Wrapf(err, "failed to map image %s, output: %s", imageSpec, output) - } - - return nil -} - -// UnMapImage unmap an RBD image from the node -func UnMapImage(context *clusterd.Context, clusterInfo *ClusterInfo, imageName, poolName, id, keyring, monitors string, force bool) error { - deviceImage := getImageSpec(imageName, poolName) - args := []string{ - "unmap", - deviceImage, - fmt.Sprintf("--id=%s", id), - fmt.Sprintf("--cluster=%s", clusterInfo.Namespace), - fmt.Sprintf("--keyring=%s", keyring), - "-m", monitors, - "--conf=/dev/null", // no config file needed because we are passing all required config as arguments - } - - if force { - args = append(args, "-o", "force") - } - - output, err := ExecuteRBDCommandWithTimeout(context, args) - if err != nil { - return errors.Wrapf(err, "failed to unmap image %s, output: %s", deviceImage, output) - } - - return nil -} - -func getImageSpec(name, poolName string) string { - return fmt.Sprintf("%s/%s", poolName, name) -} diff --git a/pkg/daemon/ceph/client/image_test.go b/pkg/daemon/ceph/client/image_test.go deleted file mode 100644 index 3a51acc5a..000000000 --- a/pkg/daemon/ceph/client/image_test.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "fmt" - "testing" - "time" - - "strings" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -const ( - sizeMB = 1048576 // 1 MB -) - -func TestCreateImage(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - - // mock an error during the create image call. rbd tool returns error information to the output stream, - // separate from the error object, so verify that information also makes it back to us (because it is useful). - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - switch { - case command == "rbd" && args[0] == "create": - return "mocked detailed ceph error output stream", errors.New("some mocked error") - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - clusterInfo := AdminClusterInfo("mycluster") - _, err := CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(sizeMB)) // 1MB - assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), "mocked detailed ceph error output stream")) - - // rbd tool interprets sizes as MB, so anything smaller than that should get rounded up to the minimum - // (except for 0, that's OK) - createCalled := false - expectedSizeArg := "" - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - switch { - case command == "rbd" && args[0] == "create": - createCalled = true - assert.Equal(t, expectedSizeArg, args[3]) - return "", nil - case command == "rbd" && args[0] == "info": - assert.Equal(t, "pool1/image1", args[1]) - return `{"name":"image1","size":1048576,"objects":1,"order":20,"object_size":1048576,"block_name_prefix":"pool1_data.229226b8b4567",` + - `"format":2,"features":["layering"],"op_features":[],"flags":[],"create_timestamp":"Fri Oct 5 19:46:20 2018"}`, nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - // 0 byte --> 0 MB - expectedSizeArg = "0" - image, err := CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(0)) - assert.Nil(t, err) - assert.NotNil(t, image) - assert.True(t, createCalled) - createCalled = false - - // 1 byte --> 1 MB - expectedSizeArg = "1" - image, err = CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(1)) - assert.Nil(t, err) - assert.NotNil(t, image) - assert.True(t, createCalled) - createCalled = false - - // (1 MB - 1 byte) --> 1 MB - expectedSizeArg = "1" - image, err = CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(sizeMB-1)) - assert.Nil(t, err) - assert.NotNil(t, image) - assert.True(t, createCalled) - createCalled = false - - // 1 MB - expectedSizeArg = "1" - image, err = CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(sizeMB)) - assert.Nil(t, err) - assert.NotNil(t, image) - assert.True(t, createCalled) - assert.Equal(t, "image1", image.Name) - assert.Equal(t, uint64(sizeMB), image.Size) - createCalled = false - - // (1 MB + 1 byte) --> 2 MB - expectedSizeArg = "2" - image, err = CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(sizeMB+1)) - assert.Nil(t, err) - assert.NotNil(t, image) - assert.True(t, createCalled) - createCalled = false - - // (2 MB - 1 byte) --> 2 MB - expectedSizeArg = "2" - image, err = CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(sizeMB*2-1)) - assert.Nil(t, err) - assert.NotNil(t, image) - assert.True(t, createCalled) - createCalled = false - - // 2 MB - expectedSizeArg = "2" - image, err = CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(sizeMB*2)) - assert.Nil(t, err) - assert.NotNil(t, image) - assert.True(t, createCalled) - createCalled = false - - // (2 MB + 1 byte) --> 3MB - expectedSizeArg = "3" - image, err = CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(sizeMB*2+1)) - assert.Nil(t, err) - assert.NotNil(t, image) - assert.True(t, createCalled) - createCalled = false - - // Pool with data pool - expectedSizeArg = "1" - image, err = CreateImage(context, clusterInfo, "image1", "pool1", "datapool1", uint64(sizeMB)) - assert.Nil(t, err) - assert.NotNil(t, image) - assert.True(t, createCalled) - createCalled = false -} - -func TestExpandImage(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithTimeout = func(timeout time.Duration, command string, args ...string) (string, error) { - switch { - case args[1] != "kube/some-image": - return "", errors.Errorf("no image %s", args[1]) - - case command == "rbd" && args[0] == "resize": - return "everything is okay", nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - clusterInfo := AdminClusterInfo("mycluster") - err := ExpandImage(context, clusterInfo, "error-name", "kube", "mon1,mon2,mon3", "/tmp/keyring", 1000000) - assert.Error(t, err) - - err = ExpandImage(context, clusterInfo, "some-image", "kube", "mon1,mon2,mon3", "/tmp/keyring", 1000000) - assert.NoError(t, err) -} - -func TestListImageLogLevelInfo(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - - var images []CephBlockImage - var err error - listCalled := false - emptyListResult := false - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - switch { - case command == "rbd" && args[0] == "ls" && args[1] == "-l": - listCalled = true - if emptyListResult { - return `[]`, nil - } else { - return `[{"image":"image1","size":1048576,"format":2},{"image":"image2","size":2048576,"format":2},{"image":"image3","size":3048576,"format":2}]`, nil - - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - clusterInfo := AdminClusterInfo("mycluster") - images, err = ListImages(context, clusterInfo, "pool1") - assert.Nil(t, err) - assert.NotNil(t, images) - assert.True(t, len(images) == 3) - assert.True(t, listCalled) - listCalled = false - - emptyListResult = true - images, err = ListImages(context, clusterInfo, "pool1") - assert.Nil(t, err) - assert.NotNil(t, images) - assert.True(t, len(images) == 0) - assert.True(t, listCalled) - listCalled = false -} - -func TestListImageLogLevelDebug(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - - var images []CephBlockImage - var err error - libradosDebugOut := `2017-08-24 19:42:10.693348 7fd64513e0c0 1 librados: starting msgr at - -2017-08-24 19:42:10.693372 7fd64513e0c0 1 librados: starting objecter -2017-08-24 19:42:10.784686 7fd64513e0c0 1 librados: setting wanted keys -2017-08-24 19:42:10.784688 7fd64513e0c0 1 librados: calling monclient init -2017-08-24 19:42:10.789337 7fd64513e0c0 1 librados: init done -2017-08-24 19:42:10.789354 7fd64513e0c0 10 librados: wait_for_osdmap waiting -2017-08-24 19:42:10.790039 7fd64513e0c0 10 librados: wait_for_osdmap done waiting -2017-08-24 19:42:10.790079 7fd64513e0c0 10 librados: read oid=rbd_directory nspace= -2017-08-24 19:42:10.792235 7fd64513e0c0 10 librados: Objecter returned from read r=0 -2017-08-24 19:42:10.792307 7fd64513e0c0 10 librados: call oid=rbd_directory nspace= -2017-08-24 19:42:10.793495 7fd64513e0c0 10 librados: Objecter returned from call r=0 -2017-08-24 19:42:11.684960 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:11.884609 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:11.884628 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:11.985068 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:11.985084 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:11.986275 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:11.986339 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:11.986498 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:11.987363 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:11.988165 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:12.385448 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:12.386804 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -2017-08-24 19:42:12.386877 7fd621ffb700 10 librados: set snap write context: seq = 0 and snaps = [] -` - - listCalled := false - emptyListResult := false - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - switch { - case command == "rbd" && args[0] == "ls" && args[1] == "-l": - listCalled = true - if emptyListResult { - return fmt.Sprintf(`%s[]`, libradosDebugOut), nil - } else { - return fmt.Sprintf(`%s[{"image":"image1","size":1048576,"format":2},{"image":"image2","size":2048576,"format":2},{"image":"image3","size":3048576,"format":2}]`, libradosDebugOut), nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - clusterInfo := AdminClusterInfo("mycluster") - images, err = ListImages(context, clusterInfo, "pool1") - assert.Nil(t, err) - assert.NotNil(t, images) - assert.True(t, len(images) == 3) - assert.True(t, listCalled) - listCalled = false - - emptyListResult = true - images, err = ListImages(context, clusterInfo, "pool1") - assert.Nil(t, err) - assert.NotNil(t, images) - assert.True(t, len(images) == 0) - assert.True(t, listCalled) - listCalled = false -} diff --git a/pkg/daemon/ceph/client/info.go b/pkg/daemon/ceph/client/info.go deleted file mode 100644 index 6c5ab5f52..000000000 --- a/pkg/daemon/ceph/client/info.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "fmt" - "net" - "testing" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" -) - -// ClusterInfo is a collection of information about a particular Ceph cluster. Rook uses information -// about the cluster to configure daemons to connect to the desired cluster. -type ClusterInfo struct { - FSID string - MonitorSecret string - CephCred CephCred - Monitors map[string]*MonInfo - CephVersion cephver.CephVersion - Namespace string - OwnerInfo *k8sutil.OwnerInfo - // Hide the name of the cluster since in 99% of uses we want to use the cluster namespace. - // If the CR name is needed, access it through the NamespacedName() method. - name string - OsdUpgradeTimeout time.Duration - NetworkSpec cephv1.NetworkSpec -} - -// MonInfo is a collection of information about a Ceph mon. -type MonInfo struct { - Name string `json:"name"` - Endpoint string `json:"endpoint"` -} - -// CephCred represents the Ceph cluster username and key used by the operator. -// For converged clusters it will be the admin key, but external clusters will have a -// lower-privileged key. -type CephCred struct { - Username string `json:"name"` - Secret string `json:"secret"` -} - -func NewClusterInfo(namespace, name string) *ClusterInfo { - return &ClusterInfo{Namespace: namespace, name: name} -} - -func (c *ClusterInfo) SetName(name string) { - c.name = name -} - -func (c *ClusterInfo) NamespacedName() types.NamespacedName { - if c.name == "" { - panic("name is not set on the clusterInfo") - } - return types.NamespacedName{Namespace: c.Namespace, Name: c.name} -} - -// AdminClusterInfo() creates a ClusterInfo with the basic info to access the cluster -// as an admin. Only a few fields are set in the struct, -// so this clusterInfo cannot be used to generate the mon config or request the -// namespacedName. A full cluster info must be populated for those operations. -func AdminClusterInfo(namespace string) *ClusterInfo { - ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(&metav1.OwnerReference{}, "") - return &ClusterInfo{ - Namespace: namespace, - CephCred: CephCred{ - Username: AdminUsername, - }, - name: "testing", - OwnerInfo: ownerInfo, - } -} - -// IsInitialized returns true if the critical information in the ClusterInfo struct has been filled -// in. This method exists less out of necessity than the desire to be explicit about the lifecycle -// of the ClusterInfo struct during startup, specifically that it is expected to exist after the -// Rook operator has started up or connected to the first components of the Ceph cluster. -func (c *ClusterInfo) IsInitialized(logError bool) bool { - var isInitialized bool - - if c == nil { - if logError { - logger.Error("clusterInfo is nil") - } - } else if c.FSID == "" { - if logError { - logger.Error("cluster fsid is empty") - } - } else if c.MonitorSecret == "" { - if logError { - logger.Error("monitor secret is empty") - } - } else if c.CephCred.Username == "" { - if logError { - logger.Error("ceph username is empty") - } - } else if c.CephCred.Secret == "" { - if logError { - logger.Error("ceph secret is empty") - } - } else { - isInitialized = true - } - - return isInitialized -} - -// NewMonInfo returns a new Ceph mon info struct from the given inputs. -func NewMonInfo(name, ip string, port int32) *MonInfo { - return &MonInfo{Name: name, Endpoint: net.JoinHostPort(ip, fmt.Sprintf("%d", port))} -} - -func NewMinimumOwnerInfo(t *testing.T) *k8sutil.OwnerInfo { - cluster := &cephv1.CephCluster{} - scheme := runtime.NewScheme() - err := cephv1.AddToScheme(scheme) - assert.NoError(t, err) - return k8sutil.NewOwnerInfo(cluster, scheme) -} - -func NewMinimumOwnerInfoWithOwnerRef() *k8sutil.OwnerInfo { - return k8sutil.NewOwnerInfoWithOwnerRef(&metav1.OwnerReference{}, "") -} diff --git a/pkg/daemon/ceph/client/keyring.go b/pkg/daemon/ceph/client/keyring.go deleted file mode 100644 index fdda15f20..000000000 --- a/pkg/daemon/ceph/client/keyring.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/base64" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" -) - -const ( - // AdminKeyringTemplate is a string template of Ceph keyring settings which allow connection - // as admin. The key value must be filled in by the admin auth key for the cluster. - AdminKeyringTemplate = ` -[client.admin] - key = %s - caps mds = "allow *" - caps mon = "allow *" - caps osd = "allow *" - caps mgr = "allow *" -` - - // UserKeyringTemplate is a string template of Ceph keyring settings which allow connection. - UserKeyringTemplate = ` -[%s] - key = %s -` -) - -// CephKeyring returns the filled-out user keyring -func CephKeyring(cred CephCred) string { - if cred.Username == AdminUsername { - return fmt.Sprintf(AdminKeyringTemplate, cred.Secret) - } - return fmt.Sprintf(UserKeyringTemplate, cred.Username, cred.Secret) -} - -// WriteKeyring calls the generate contents function with auth key as an argument then saves the -// output of the generateContents function to disk at the keyring path -// TODO: Kludgey; can keyring files be generated w/ go-ini package or using the '-o' option to -// 'ceph auth get-or-create ...'? -func WriteKeyring(keyringPath, authKey string, generateContents func(string) string) error { - contents := generateContents(authKey) - return writeKeyring(contents, keyringPath) -} - -// CreateKeyring creates a keyring for access to the cluster with the desired set of privileges -// and writes it to disk at the keyring path -func CreateKeyring(context *clusterd.Context, clusterInfo *ClusterInfo, username, keyringPath string, access []string, generateContents func(string) string) error { - _, err := os.Stat(keyringPath) - if err == nil { - // no error, the file exists, bail out with no error - logger.Debugf("keyring already exists at %s", keyringPath) - return nil - } else if !os.IsNotExist(err) { - // some other error besides "does not exist", bail out with error - return errors.Wrapf(err, "failed to stat %s", keyringPath) - } - - // get-or-create-key for the user account - key, err := AuthGetOrCreateKey(context, clusterInfo, username, access) - if err != nil { - return errors.Wrapf(err, "failed to get or create auth key for %s", username) - } - - return WriteKeyring(keyringPath, key, generateContents) -} - -// writes the keyring to disk -// TODO: Write keyring only to the default ceph config location since we are in a container -func writeKeyring(keyring, keyringPath string) error { - // save the keyring to the given path - if err := os.MkdirAll(filepath.Dir(keyringPath), 0700); err != nil { - return errors.Wrapf(err, "failed to create keyring directory for %s", keyringPath) - } - if err := ioutil.WriteFile(keyringPath, []byte(keyring), 0600); err != nil { - return errors.Wrapf(err, "failed to write monitor keyring to %s", keyringPath) - } - return nil -} - -// IsKeyringBase64Encoded returns whether the keyring is valid -func IsKeyringBase64Encoded(keyring string) bool { - // If the keyring is not base64 we fail - _, err := base64.StdEncoding.DecodeString(keyring) - if err != nil { - logger.Errorf("key is not base64 encoded. %v", err) - return false - } - - return true -} diff --git a/pkg/daemon/ceph/client/mgr.go b/pkg/daemon/ceph/client/mgr.go deleted file mode 100755 index 9fca665d1..000000000 --- a/pkg/daemon/ceph/client/mgr.go +++ /dev/null @@ -1,186 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - "time" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" -) - -var ( - moduleEnableWaitTime = 5 * time.Second -) - -func CephMgrMap(context *clusterd.Context, clusterInfo *ClusterInfo) (*MgrMap, error) { - args := []string{"mgr", "dump"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - if len(buf) > 0 { - return nil, errors.Wrapf(err, "failed to get mgr dump. %s", string(buf)) - } - return nil, errors.Wrap(err, "failed to get mgr dump") - } - - var mgrMap MgrMap - if err := json.Unmarshal([]byte(buf), &mgrMap); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal mgr dump") - } - - return &mgrMap, nil -} - -func CephMgrStat(context *clusterd.Context, clusterInfo *ClusterInfo) (*MgrStat, error) { - args := []string{"mgr", "stat"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - if len(buf) > 0 { - return nil, errors.Wrapf(err, "failed to get mgr stat. %s", string(buf)) - } - return nil, errors.Wrap(err, "failed to get mgr stat") - } - - var mgrStat MgrStat - if err := json.Unmarshal([]byte(buf), &mgrStat); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal mgr stat") - } - - return &mgrStat, nil -} - -// MgrEnableModule enables a mgr module -func MgrEnableModule(context *clusterd.Context, clusterInfo *ClusterInfo, name string, force bool) error { - retryCount := 5 - var err error - for i := 0; i < retryCount; i++ { - /* In Pacific the balancer is now on by default in upmap mode. - In earlier versions, the balancer was included in the ``always_on_modules`` list, but needed to be - turned on explicitly using the ``ceph balancer on`` command. */ - if name == "balancer" && clusterInfo.CephVersion.IsAtLeastPacific() { - logger.Debug("balancer module is already 'on' on pacific, doing nothing", name) - return nil - } else if name == "balancer" { - err = enableDisableBalancerModule(context, clusterInfo, "on") - } else { - err = enableModule(context, clusterInfo, name, force, "enable") - } - if err != nil { - if i < retryCount-1 { - logger.Warningf("failed to enable mgr module %q. trying again...", name) - time.Sleep(moduleEnableWaitTime) - continue - } else { - return errors.Wrapf(err, "failed to enable mgr module %q even after %d retries", name, retryCount) - } - } - break - } - return nil -} - -// MgrDisableModule disables a mgr module -func MgrDisableModule(context *clusterd.Context, clusterInfo *ClusterInfo, name string) error { - if name == "balancer" { - return enableDisableBalancerModule(context, clusterInfo, "off") - } - return enableModule(context, clusterInfo, name, false, "disable") -} - -func enableModule(context *clusterd.Context, clusterInfo *ClusterInfo, name string, force bool, action string) error { - args := []string{"mgr", "module", action, name} - if force { - args = append(args, "--force") - } - - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to enable mgr module %q", name) - } - - return nil -} - -// enableDisableBalancerModule enables the ceph balancer module -func enableDisableBalancerModule(context *clusterd.Context, clusterInfo *ClusterInfo, action string) error { - args := []string{"balancer", action} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to turn %q the balancer module", action) - } - - return nil -} - -func setBalancerMode(context *clusterd.Context, clusterInfo *ClusterInfo, mode string) error { - args := []string{"balancer", "mode", mode} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to set balancer mode %q", mode) - } - - return nil -} - -// setMinCompatClientLuminous set the minimum compatibility for clients to Luminous -func setMinCompatClientLuminous(context *clusterd.Context, clusterInfo *ClusterInfo) error { - args := []string{"osd", "set-require-min-compat-client", "luminous", "--yes-i-really-mean-it"} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrap(err, "failed to set set-require-min-compat-client to luminous") - } - - return nil -} - -// mgrSetBalancerMode sets the given mode to the balancer module -func mgrSetBalancerMode(context *clusterd.Context, clusterInfo *ClusterInfo, balancerModuleMode string) error { - retryCount := 5 - for i := 0; i < retryCount; i++ { - err := setBalancerMode(context, clusterInfo, balancerModuleMode) - if err != nil { - if i < retryCount-1 { - logger.Warningf("failed to set mgr module mode %q. trying again...", balancerModuleMode) - time.Sleep(moduleEnableWaitTime) - continue - } else { - return errors.Wrapf(err, "failed to set mgr module mode %q even after %d retries", balancerModuleMode, retryCount) - } - } - break - } - - return nil -} - -// ConfigureBalancerModule configures the balancer module -func ConfigureBalancerModule(context *clusterd.Context, clusterInfo *ClusterInfo, balancerModuleMode string) error { - // Set min compat client to luminous before enabling the balancer mode "upmap" - err := setMinCompatClientLuminous(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to set minimum compatibility client") - } - - // Set balancer module mode - err = mgrSetBalancerMode(context, clusterInfo, balancerModuleMode) - if err != nil { - return errors.Wrapf(err, "failed to set balancer module mode to %q", balancerModuleMode) - } - - return nil -} diff --git a/pkg/daemon/ceph/client/mgr_test.go b/pkg/daemon/ceph/client/mgr_test.go deleted file mode 100644 index 95fe6ff17..000000000 --- a/pkg/daemon/ceph/client/mgr_test.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/ceph/version" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestEnableModuleRetries(t *testing.T) { - moduleEnableRetries := 0 - moduleEnableWaitTime = 0 - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - switch { - case args[0] == "balancer" && args[1] == "on": - return "", nil - - case args[0] == "mgr" && args[1] == "module" && args[2] == "enable": - if args[3] == "prometheus" || args[3] == "pg_autoscaler" || args[3] == "crash" { - return "", nil - } - - case args[0] == "mgr" && args[1] == "module" && args[2] == "disable": - if args[3] == "prometheus" || args[3] == "pg_autoscaler" || args[3] == "crash" { - return "", nil - } - } - - moduleEnableRetries = moduleEnableRetries + 1 - return "", errors.Errorf("unexpected ceph command %q", args) - - } - - clusterInfo := AdminClusterInfo("mycluster") - _ = MgrEnableModule(&clusterd.Context{Executor: executor}, clusterInfo, "invalidModuleName", false) - assert.Equal(t, 5, moduleEnableRetries) - - moduleEnableRetries = 0 - _ = MgrEnableModule(&clusterd.Context{Executor: executor}, clusterInfo, "pg_autoscaler", false) - assert.Equal(t, 0, moduleEnableRetries) - - // Balancer not on Ceph Pacific - moduleEnableRetries = 0 - _ = MgrEnableModule(&clusterd.Context{Executor: executor}, clusterInfo, "balancer", false) - assert.Equal(t, 0, moduleEnableRetries) - - // Balancer skipped on Pacific - clusterInfo.CephVersion = version.Pacific - _ = MgrEnableModule(&clusterd.Context{Executor: executor}, clusterInfo, "balancer", false) - assert.Equal(t, 0, moduleEnableRetries) - -} - -func TestEnableModule(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - switch { - case args[0] == "mgr" && args[1] == "module" && args[2] == "enable": - if args[3] == "prometheus" || args[3] == "pg_autoscaler" || args[3] == "crash" { - return "", nil - } - - case args[0] == "mgr" && args[1] == "module" && args[2] == "disable": - if args[3] == "prometheus" || args[3] == "pg_autoscaler" || args[3] == "crash" { - return "", nil - } - } - - return "", errors.Errorf("unexpected ceph command %q", args) - } - - clusterInfo := AdminClusterInfo("mycluster") - err := enableModule(&clusterd.Context{Executor: executor}, clusterInfo, "pg_autoscaler", true, "enable") - assert.NoError(t, err) - - err = enableModule(&clusterd.Context{Executor: executor}, clusterInfo, "prometheus", true, "disable") - assert.NoError(t, err) - - err = enableModule(&clusterd.Context{Executor: executor}, clusterInfo, "invalidModuleName", false, "enable") - assert.Error(t, err) - - err = enableModule(&clusterd.Context{Executor: executor}, clusterInfo, "pg_autoscaler", false, "invalidCommandArgs") - assert.Error(t, err) -} - -func TestEnableDisableBalancerModule(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - switch { - case args[0] == "balancer" && args[1] == "on": - return "", nil - - case args[0] == "balancer" && args[1] == "off": - return "", nil - - } - - return "", errors.Errorf("unexpected ceph command %q", args) - } - - clusterInfo := AdminClusterInfo("mycluster") - err := enableDisableBalancerModule(&clusterd.Context{Executor: executor}, clusterInfo, "on") - assert.NoError(t, err) - - err = enableDisableBalancerModule(&clusterd.Context{Executor: executor}, clusterInfo, "off") - assert.NoError(t, err) -} - -func TestSetBalancerMode(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "balancer" && args[1] == "mode" && args[2] == "upmap" { - return "", nil - } - - return "", errors.Errorf("unexpected ceph command %q", args) - } - - err := setBalancerMode(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "upmap") - assert.NoError(t, err) -} diff --git a/pkg/daemon/ceph/client/mirror.go b/pkg/daemon/ceph/client/mirror.go deleted file mode 100644 index c0630dc46..000000000 --- a/pkg/daemon/ceph/client/mirror.go +++ /dev/null @@ -1,386 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "k8s.io/apimachinery/pkg/util/sets" -) - -// PeerToken is the content of the peer token -type PeerToken struct { - ClusterFSID string `json:"fsid"` - ClientID string `json:"client_id"` - Key string `json:"key"` - MonHost string `json:"mon_host"` - // These fields are added by Rook and NOT part of the output of client.CreateRBDMirrorBootstrapPeer() - Namespace string `json:"namespace"` -} - -var ( - rbdMirrorPeerCaps = []string{"mon", "profile rbd-mirror-peer", "osd", "profile rbd"} - rbdMirrorPeerKeyringID = "rbd-mirror-peer" -) - -// ImportRBDMirrorBootstrapPeer add a mirror peer in the rbd-mirror configuration -func ImportRBDMirrorBootstrapPeer(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string, direction string, token []byte) error { - logger.Infof("add rbd-mirror bootstrap peer token for pool %q", poolName) - - // Token file - tokenFilePattern := fmt.Sprintf("rbd-mirror-token-%s", poolName) - tokenFilePath, err := ioutil.TempFile("/tmp", tokenFilePattern) - if err != nil { - return errors.Wrapf(err, "failed to create temporary token file for pool %q", poolName) - } - - // Write token into a file - err = ioutil.WriteFile(tokenFilePath.Name(), token, 0400) - if err != nil { - return errors.Wrapf(err, "failed to write token to file %q", tokenFilePath.Name()) - } - - // Remove token once we exit, we don't need it anymore - defer func() error { - err := os.Remove(tokenFilePath.Name()) - return err - }() //nolint // we don't want to return here - - // Build command - args := []string{"mirror", "pool", "peer", "bootstrap", "import", poolName, tokenFilePath.Name()} - if direction != "" { - args = append(args, "--direction", direction) - } - cmd := NewRBDCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to add rbd-mirror peer token for pool %q. %s", poolName, output) - } - - logger.Infof("successfully added rbd-mirror peer token for pool %q", poolName) - return nil -} - -// CreateRBDMirrorBootstrapPeer add a mirror peer in the rbd-mirror configuration -func CreateRBDMirrorBootstrapPeer(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string) ([]byte, error) { - logger.Infof("create rbd-mirror bootstrap peer token for pool %q", poolName) - - // Build command - args := []string{"mirror", "pool", "peer", "bootstrap", "create", poolName} - cmd := NewRBDCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to create rbd-mirror peer token for pool %q. %s", poolName, output) - } - - logger.Infof("successfully created rbd-mirror bootstrap peer token for pool %q", poolName) - return output, nil -} - -// enablePoolMirroring turns on mirroring on that pool by specifying the mirroring type -func enablePoolMirroring(context *clusterd.Context, clusterInfo *ClusterInfo, pool cephv1.PoolSpec, poolName string) error { - logger.Infof("enabling mirroring type %q for pool %q", pool.Mirroring.Mode, poolName) - - // Build command - args := []string{"mirror", "pool", "enable", poolName, pool.Mirroring.Mode} - cmd := NewRBDCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to enable mirroring type %q for pool %q. %s", pool.Mirroring.Mode, poolName, output) - } - - return nil -} - -// disablePoolMirroring turns off mirroring on a pool -func disablePoolMirroring(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string) error { - logger.Infof("disabling mirroring for pool %q", poolName) - - // Build command - args := []string{"mirror", "pool", "disable", poolName} - cmd := NewRBDCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to disable mirroring for pool %q. %s", poolName, output) - } - - return nil -} - -func removeClusterPeer(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, peerUUID string) error { - logger.Infof("removing cluster peer with UUID %q for the pool %q", peerUUID, poolName) - - // Build command - args := []string{"mirror", "pool", "peer", "remove", poolName, peerUUID} - cmd := NewRBDCommand(context, clusterInfo, args) - - // Run command - output, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to remove cluster peer with UUID %q for the pool %q. %s", peerUUID, poolName, output) - } - - return nil -} - -// GetPoolMirroringStatus prints the pool mirroring status -func GetPoolMirroringStatus(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string) (*cephv1.PoolMirroringStatus, error) { - logger.Debugf("retrieving mirroring pool %q status", poolName) - - // Build command - args := []string{"mirror", "pool", "status", poolName} - cmd := NewRBDCommand(context, clusterInfo, args) - cmd.JsonOutput = true - - // Run command - buf, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve mirroring pool %q status", poolName) - } - - var poolMirroringStatus cephv1.PoolMirroringStatus - if err := json.Unmarshal([]byte(buf), &poolMirroringStatus); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal mirror pool status response") - } - - return &poolMirroringStatus, nil -} - -// GetPoolMirroringInfo prints the pool mirroring information -func GetPoolMirroringInfo(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string) (*cephv1.PoolMirroringInfo, error) { - logger.Debugf("retrieving mirroring pool %q info", poolName) - - // Build command - args := []string{"mirror", "pool", "info", poolName} - cmd := NewRBDCommand(context, clusterInfo, args) - cmd.JsonOutput = true - - // Run command - buf, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve mirroring pool %q info. %s", poolName, string(buf)) - } - - // Unmarshal JSON into Go struct - var poolMirroringInfo cephv1.PoolMirroringInfo - if err := json.Unmarshal(buf, &poolMirroringInfo); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal mirror pool info response") - } - - return &poolMirroringInfo, nil -} - -// enableSnapshotSchedule configures the snapshots schedule on a mirrored pool -func enableSnapshotSchedule(context *clusterd.Context, clusterInfo *ClusterInfo, snapSpec cephv1.SnapshotScheduleSpec, poolName string) error { - logger.Infof("enabling snapshot schedule for pool %q", poolName) - - // Build command - args := []string{"mirror", "snapshot", "schedule", "add", "--pool", poolName, snapSpec.Interval} - - // If a start time is defined let's add it - if snapSpec.StartTime != "" { - args = append(args, snapSpec.StartTime) - } - cmd := NewRBDCommand(context, clusterInfo, args) - - // Run command - buf, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to enable snapshot schedule on pool %q. %s", poolName, string(buf)) - } - - logger.Infof("successfully enabled snapshot schedule for pool %q every %q", poolName, snapSpec.Interval) - return nil -} - -// removeSnapshotSchedule removes the snapshots schedule on a mirrored pool -func removeSnapshotSchedule(context *clusterd.Context, clusterInfo *ClusterInfo, snapScheduleResponse cephv1.SnapshotSchedule, poolName string) error { - logger.Debugf("removing snapshot schedule for pool %q (before adding new ones)", poolName) - - // Build command - args := []string{"mirror", "snapshot", "schedule", "remove", "--pool", poolName, snapScheduleResponse.Interval} - - // If a start time is defined let's add it - if snapScheduleResponse.StartTime != "" { - args = append(args, snapScheduleResponse.StartTime) - } - cmd := NewRBDCommand(context, clusterInfo, args) - - // Run command - buf, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to remove snapshot schedule on pool %q. %s", poolName, string(buf)) - } - - logger.Infof("successfully removed snapshot schedule %q for pool %q", poolName, snapScheduleResponse.Interval) - return nil -} - -func enableSnapshotSchedules(context *clusterd.Context, clusterInfo *ClusterInfo, poolSpec cephv1.PoolSpec, poolName string) error { - logger.Info("resetting current snapshot schedules") - // Reset any existing schedules - err := removeSnapshotSchedules(context, clusterInfo, poolSpec, poolName) - if err != nil { - logger.Errorf("failed to remove snapshot schedules. %v", err) - } - - // Enable all the snap schedules - for _, snapSchedule := range poolSpec.Mirroring.SnapshotSchedules { - err := enableSnapshotSchedule(context, clusterInfo, snapSchedule, poolName) - if err != nil { - return errors.Wrap(err, "failed to enable snapshot schedule") - } - } - - return nil -} - -// removeSnapshotSchedules removes all the existing snapshot schedules -func removeSnapshotSchedules(context *clusterd.Context, clusterInfo *ClusterInfo, poolSpec cephv1.PoolSpec, poolName string) error { - // Get the list of existing snapshot schedule - existingSnapshotSchedules, err := listSnapshotSchedules(context, clusterInfo, poolName) - if err != nil { - return errors.Wrap(err, "failed to list snapshot schedule(s)") - } - - // Remove each schedule - for _, existingSnapshotSchedule := range existingSnapshotSchedules { - err := removeSnapshotSchedule(context, clusterInfo, existingSnapshotSchedule, poolName) - if err != nil { - return errors.Wrapf(err, "failed to remove snapshot schedule %v", existingSnapshotSchedule) - } - } - - return nil -} - -// listSnapshotSchedules configures the snapshots schedule on a mirrored pool -func listSnapshotSchedules(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string) ([]cephv1.SnapshotSchedule, error) { - // Build command - args := []string{"mirror", "snapshot", "schedule", "ls", "--pool", poolName} - cmd := NewRBDCommand(context, clusterInfo, args) - cmd.JsonOutput = true - - // Run command - buf, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve snapshot schedules on pool %q. %s", poolName, string(buf)) - } - - // Unmarshal JSON into Go struct - var snapshotSchedules []cephv1.SnapshotSchedule - if err := json.Unmarshal([]byte(buf), &snapshotSchedules); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal mirror snapshot schedule list response") - } - - logger.Debugf("successfully listed snapshot schedules for pool %q", poolName) - return snapshotSchedules, nil -} - -// ListSnapshotSchedulesRecursively configures the snapshots schedule on a mirrored pool -func ListSnapshotSchedulesRecursively(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string) ([]cephv1.SnapshotSchedulesSpec, error) { - // Build command - args := []string{"mirror", "snapshot", "schedule", "ls", "--pool", poolName, "--recursive"} - cmd := NewRBDCommand(context, clusterInfo, args) - cmd.JsonOutput = true - - // Run command - buf, err := cmd.Run() - if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve snapshot schedules recursively on pool %q. %s", poolName, string(buf)) - } - - // Unmarshal JSON into Go struct - var snapshotSchedulesRecursive []cephv1.SnapshotSchedulesSpec - if err := json.Unmarshal([]byte(buf), &snapshotSchedulesRecursive); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal mirror snapshot schedule list recursive response") - } - - logger.Debugf("successfully recursively listed snapshot schedules for pool %q", poolName) - return snapshotSchedulesRecursive, nil -} - -/* CreateRBDMirrorBootstrapPeerWithoutPool creates a bootstrap peer for the current cluster -It creates the cephx user for the remote cluster to use with all the necessary details -This function is handy on scenarios where no pools have been created yet but replication communication is required (connecting peers) -It essentially sits above CreateRBDMirrorBootstrapPeer() -and is a cluster-wide option in the scenario where all the pools will be mirrored to the same remote cluster - -So the scenario looks like: - - 1) Create the cephx ID on the source cluster - - 2) Enable a source pool for mirroring - at any time, we just don't know when - rbd --cluster site-a mirror pool enable image-pool image - - 3) Copy the key details over to the other cluster (non-ceph workflow) - - 4) Enable destination pool for mirroring - rbd --cluster site-b mirror pool enable image-pool image - - 5) Add the peer details to the destination pool - - 6) Repeat the steps flipping source and destination to enable - bi-directional mirroring -*/ -func CreateRBDMirrorBootstrapPeerWithoutPool(context *clusterd.Context, clusterInfo *ClusterInfo) ([]byte, error) { - fullClientName := getQualifiedUser(rbdMirrorPeerKeyringID) - logger.Infof("create rbd-mirror bootstrap peer token %q", fullClientName) - key, err := AuthGetOrCreateKey(context, clusterInfo, fullClientName, rbdMirrorPeerCaps) - if err != nil { - return nil, errors.Wrapf(err, "failed to create rbd-mirror peer key %q", fullClientName) - } - logger.Infof("successfully created rbd-mirror bootstrap peer token for cluster %q", clusterInfo.NamespacedName().Name) - - mons := sets.NewString() - for _, mon := range clusterInfo.Monitors { - mons.Insert(mon.Endpoint) - } - - peerToken := PeerToken{ - ClusterFSID: clusterInfo.FSID, - ClientID: rbdMirrorPeerKeyringID, - Key: key, - MonHost: strings.Join(mons.UnsortedList(), ","), - Namespace: clusterInfo.Namespace, - } - - // Marshal the Go type back to JSON - decodedTokenBackToJSON, err := json.Marshal(peerToken) - if err != nil { - return nil, errors.Wrap(err, "failed to encode peer token to json") - } - - // Return the base64 encoded token - return []byte(base64.StdEncoding.EncodeToString(decodedTokenBackToJSON)), nil -} diff --git a/pkg/daemon/ceph/client/mirror_test.go b/pkg/daemon/ceph/client/mirror_test.go deleted file mode 100644 index b4220e7d7..000000000 --- a/pkg/daemon/ceph/client/mirror_test.go +++ /dev/null @@ -1,342 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "testing" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -var ( - bootstrapPeerToken = `eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ==` //nolint:gosec // This is just a var name, not a real token - mirrorStatus = `{"summary":{"health":"WARNING","daemon_health":"OK","image_health":"WARNING","states":{"starting_replay":1,"replaying":1}}}` - mirrorInfo = `{"mode":"image","site_name":"39074576-5884-4ef3-8a4d-8a0c5ed33031","peers":[{"uuid":"4a6983c0-3c9d-40f5-b2a9-2334a4659827","direction":"rx-tx","site_name":"ocs","mirror_uuid":"","client_name":"client.rbd-mirror-peer"}]}` - snapshotScheduleStatus = `[{"schedule_time": "14:00:00-05:00", "image": "foo"}, {"schedule_time": "08:00:00-05:00", "image": "bar"}]` - snapshotScheduleList = `[{"interval":"3d","start_time":""},{"interval":"1d","start_time":"14:00:00-05:00"}]` - snapshotScheduleListRecursive = `[{"pool":"replicapool","namespace":"-","image":"-","items":[{"interval":"1d","start_time":"14:00:00-05:00"}]},{"pool":"replicapool","namespace":"","image":"snapeuh","items":[{"interval":"1d","start_time":"14:00:00-05:00"},{"interval":"4h","start_time":"14:00:00-05:00"},{"interval":"4h","start_time":"04:00:00-05:00"}]}]` -) - -func TestCreateRBDMirrorBootstrapPeer(t *testing.T) { - pool := "pool-test" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "mirror" { - assert.Equal(t, "pool", args[1]) - assert.Equal(t, "peer", args[2]) - assert.Equal(t, "bootstrap", args[3]) - assert.Equal(t, "create", args[4]) - assert.Equal(t, pool, args[5]) - return bootstrapPeerToken, nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - c := AdminClusterInfo("mycluster") - c.FSID = "4fe04ebb-ec0c-46c2-ac55-9eb52ebbfb82" - - token, err := CreateRBDMirrorBootstrapPeer(context, c, pool) - assert.NoError(t, err) - assert.Equal(t, bootstrapPeerToken, string(token)) -} -func TestEnablePoolMirroring(t *testing.T) { - pool := "pool-test" - poolSpec := cephv1.PoolSpec{Mirroring: cephv1.MirroringSpec{Mode: "image"}} - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "mirror" { - assert.Equal(t, "pool", args[1]) - assert.Equal(t, "enable", args[2]) - assert.Equal(t, pool, args[3]) - assert.Equal(t, poolSpec.Mirroring.Mode, args[4]) - return "", nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - err := enablePoolMirroring(context, AdminClusterInfo("mycluster"), poolSpec, pool) - assert.NoError(t, err) -} - -func TestGetPoolMirroringStatus(t *testing.T) { - pool := "pool-test" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "mirror" { - assert.Equal(t, "pool", args[1]) - assert.Equal(t, "status", args[2]) - assert.Equal(t, pool, args[3]) - return mirrorStatus, nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - poolMirrorStatus, err := GetPoolMirroringStatus(context, AdminClusterInfo("mycluster"), pool) - assert.NoError(t, err) - assert.Equal(t, "WARNING", poolMirrorStatus.Summary.Health) - assert.Equal(t, "OK", poolMirrorStatus.Summary.DaemonHealth) -} - -func TestImportRBDMirrorBootstrapPeer(t *testing.T) { - pool := "pool-test" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "mirror" { - assert.Equal(t, "pool", args[1]) - assert.Equal(t, "peer", args[2]) - assert.Equal(t, "bootstrap", args[3]) - assert.Equal(t, "import", args[4]) - assert.Equal(t, pool, args[5]) - assert.Equal(t, 11, len(args)) - return mirrorStatus, nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - err := ImportRBDMirrorBootstrapPeer(context, AdminClusterInfo("mycluster"), pool, "", []byte(bootstrapPeerToken)) - assert.NoError(t, err) - - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "mirror" { - assert.Equal(t, "pool", args[1]) - assert.Equal(t, "peer", args[2]) - assert.Equal(t, "bootstrap", args[3]) - assert.Equal(t, "import", args[4]) - assert.Equal(t, pool, args[5]) - assert.Equal(t, "--direction", args[7]) - assert.Equal(t, "rx-tx", args[8]) - assert.Equal(t, 13, len(args)) - return mirrorStatus, nil - } - return "", errors.New("unknown command") - } - context = &clusterd.Context{Executor: executor} - err = ImportRBDMirrorBootstrapPeer(context, AdminClusterInfo("mycluster"), pool, "rx-tx", []byte(bootstrapPeerToken)) - assert.NoError(t, err) -} - -func TestGetPoolMirroringInfo(t *testing.T) { - pool := "pool-test" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "mirror" { - assert.Equal(t, "pool", args[1]) - assert.Equal(t, "info", args[2]) - assert.Equal(t, pool, args[3]) - return mirrorInfo, nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - poolMirrorInfo, err := GetPoolMirroringInfo(context, AdminClusterInfo("mycluster"), pool) - assert.NoError(t, err) - assert.Equal(t, "image", poolMirrorInfo.Mode) - assert.Equal(t, 1, len(poolMirrorInfo.Peers)) -} - -func TestEnableSnapshotSchedule(t *testing.T) { - pool := "pool-test" - interval := "24h" - - // Schedule with Interval - { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %v %v", command, args) - if args[0] == "mirror" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "schedule", args[2]) - assert.Equal(t, "add", args[3]) - assert.Equal(t, "--pool", args[4]) - assert.Equal(t, pool, args[5]) - assert.Equal(t, interval, args[6]) - assert.Equal(t, len(args), 11) - return "success", nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - poolSpec := &cephv1.PoolSpec{Mirroring: cephv1.MirroringSpec{SnapshotSchedules: []cephv1.SnapshotScheduleSpec{{Interval: interval}}}} - - err := enableSnapshotSchedule(context, AdminClusterInfo("mycluster"), poolSpec.Mirroring.SnapshotSchedules[0], pool) - assert.NoError(t, err) - } - - // Schedule with Interval and start time - { - startTime := "14:00:00-05:00" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %v %v", command, args) - if args[0] == "mirror" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "schedule", args[2]) - assert.Equal(t, "add", args[3]) - assert.Equal(t, "--pool", args[4]) - assert.Equal(t, pool, args[5]) - assert.Equal(t, interval, args[6]) - assert.Equal(t, startTime, args[7]) - assert.Equal(t, len(args), 12) - return "success", nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - poolSpec := &cephv1.PoolSpec{Mirroring: cephv1.MirroringSpec{SnapshotSchedules: []cephv1.SnapshotScheduleSpec{{Interval: interval, StartTime: startTime}}}} - - err := enableSnapshotSchedule(context, AdminClusterInfo("mycluster"), poolSpec.Mirroring.SnapshotSchedules[0], pool) - assert.NoError(t, err) - } -} - -func TestListSnapshotSchedules(t *testing.T) { - pool := "pool-test" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %v %v", command, args) - if args[0] == "mirror" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "schedule", args[2]) - assert.Equal(t, "ls", args[3]) - assert.Equal(t, "--pool", args[4]) - assert.Equal(t, pool, args[5]) - return snapshotScheduleStatus, nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - snapshotScheduleStatus, err := listSnapshotSchedules(context, AdminClusterInfo("mycluster"), pool) - assert.NoError(t, err) - assert.Equal(t, 2, len(snapshotScheduleStatus)) -} - -func TestListSnapshotSchedulesRecursively(t *testing.T) { - pool := "pool-test" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %v %v", command, args) - if args[0] == "mirror" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "schedule", args[2]) - assert.Equal(t, "ls", args[3]) - assert.Equal(t, "--pool", args[4]) - assert.Equal(t, pool, args[5]) - assert.Equal(t, "--recursive", args[6]) - return snapshotScheduleListRecursive, nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - snapshotScheduleStatus, err := ListSnapshotSchedulesRecursively(context, AdminClusterInfo("mycluster"), pool) - assert.NoError(t, err) - assert.Equal(t, 2, len(snapshotScheduleStatus)) -} - -func TestRemoveSnapshotSchedule(t *testing.T) { - pool := "pool-test" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %v %v", command, args) - if args[0] == "mirror" { - assert.Equal(t, "snapshot", args[1]) - assert.Equal(t, "schedule", args[2]) - assert.Equal(t, "remove", args[3]) - assert.Equal(t, "--pool", args[4]) - assert.Equal(t, pool, args[5]) - return "", nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - snapScheduleResponse := cephv1.SnapshotSchedule{StartTime: "14:00:00-05:00", Interval: "1d"} - err := removeSnapshotSchedule(context, AdminClusterInfo("mycluster"), snapScheduleResponse, pool) - assert.NoError(t, err) -} - -func TestRemoveSnapshotSchedules(t *testing.T) { - pool := "pool-test" - interval := "24h" - startTime := "14:00:00-05:00" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %v %v", command, args) - if args[0] == "mirror" { - switch args[3] { - case "ls": - return snapshotScheduleList, nil - case "remove": - return "success", nil - } - } - return "", errors.New("unknown command") - } - - context := &clusterd.Context{Executor: executor} - poolSpec := &cephv1.PoolSpec{Mirroring: cephv1.MirroringSpec{SnapshotSchedules: []cephv1.SnapshotScheduleSpec{{Interval: interval, StartTime: startTime}}}} - err := removeSnapshotSchedules(context, AdminClusterInfo("mycluster"), *poolSpec, pool) - assert.NoError(t, err) -} - -func TestDisableMirroring(t *testing.T) { - pool := "pool-test" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "mirror" { - assert.Equal(t, "pool", args[1]) - assert.Equal(t, "disable", args[2]) - assert.Equal(t, pool, args[3]) - return "", nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - err := disablePoolMirroring(context, AdminClusterInfo("mycluster"), pool) - assert.NoError(t, err) -} - -func TestRemoveClusterPeer(t *testing.T) { - pool := "pool-test" - peerUUID := "39ae33fb-1dd6-4f9b-8ed7-0e4517068900" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if args[0] == "mirror" { - assert.Equal(t, "pool", args[1]) - assert.Equal(t, "peer", args[2]) - assert.Equal(t, "remove", args[3]) - assert.Equal(t, pool, args[4]) - assert.Equal(t, peerUUID, args[5]) - return "", nil - } - return "", errors.New("unknown command") - } - context := &clusterd.Context{Executor: executor} - - err := removeClusterPeer(context, AdminClusterInfo("mycluster"), pool, peerUUID) - assert.NoError(t, err) -} diff --git a/pkg/daemon/ceph/client/mon.go b/pkg/daemon/ceph/client/mon.go deleted file mode 100644 index dec00f07b..000000000 --- a/pkg/daemon/ceph/client/mon.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "encoding/json" - "strings" - "syscall" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/util/exec" -) - -const ( - defaultStretchCrushRuleName = "default_stretch_cluster_rule" -) - -// MonStatusResponse represents the response from a quorum_status mon_command (subset of all available fields, only -// marshal ones we care about) -type MonStatusResponse struct { - Quorum []int `json:"quorum"` - MonMap struct { - Mons []MonMapEntry `json:"mons"` - } `json:"monmap"` -} - -// MonMapEntry represents an entry in the monitor map -type MonMapEntry struct { - Name string `json:"name"` - Rank int `json:"rank"` - Address string `json:"addr"` - PublicAddr string `json:"public_addr"` - PublicAddrs struct { - Addrvec []AddrvecEntry `json:"addrvec"` - } `json:"public_addrs"` -} - -// AddrvecEntry represents an entry type for a given messenger version -type AddrvecEntry struct { - Type string `json:"type"` - Addr string `json:"addr"` - Nonce int `json:"nonce"` -} - -// MonDump represents the response from a mon dump -type MonDump struct { - StretchMode bool `json:"stretch_mode"` - ElectionStrategy int `json:"election_strategy"` - FSID string `json:"fsid"` - Mons []MonDumpEntry `json:"mons"` - Quorum []int `json:"quorum"` -} - -type MonDumpEntry struct { - Name string `json:"name"` - Rank int `json:"rank"` - CrushLocation string `json:"crush_location"` -} - -// GetMonQuorumStatus calls quorum_status mon_command -func GetMonQuorumStatus(context *clusterd.Context, clusterInfo *ClusterInfo) (MonStatusResponse, error) { - args := []string{"quorum_status"} - cmd := NewCephCommand(context, clusterInfo, args) - buf, err := cmd.Run() - if err != nil { - return MonStatusResponse{}, errors.Wrap(err, "mon quorum status failed") - } - - var resp MonStatusResponse - err = json.Unmarshal(buf, &resp) - if err != nil { - return MonStatusResponse{}, errors.Wrapf(err, "unmarshal failed. raw buffer response: %s", buf) - } - - return resp, nil -} - -// GetMonDump calls mon dump command -func GetMonDump(context *clusterd.Context, clusterInfo *ClusterInfo) (MonDump, error) { - args := []string{"mon", "dump"} - cmd := NewCephCommand(context, clusterInfo, args) - buf, err := cmd.Run() - if err != nil { - return MonDump{}, errors.Wrap(err, "mon dump failed") - } - - var response MonDump - err = json.Unmarshal(buf, &response) - if err != nil { - return MonDump{}, errors.Wrapf(err, "unmarshal failed. raw buffer response: %s", buf) - } - - return response, nil -} - -// EnableStretchElectionStrategy enables the mon connectivity algorithm for stretch clusters -func EnableStretchElectionStrategy(context *clusterd.Context, clusterInfo *ClusterInfo) error { - args := []string{"mon", "set", "election_strategy", "connectivity"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrap(err, "failed to enable stretch cluster election strategy") - } - logger.Infof("successfully enabled stretch cluster election strategy. %s", string(buf)) - return nil -} - -// CreateDefaultStretchCrushRule creates the default CRUSH rule for the stretch cluster -func CreateDefaultStretchCrushRule(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, failureDomain string) error { - pool := cephv1.PoolSpec{ - FailureDomain: failureDomain, - Replicated: cephv1.ReplicatedSpec{SubFailureDomain: clusterSpec.Mon.StretchCluster.SubFailureDomain}, - } - if err := createStretchCrushRule(context, clusterInfo, clusterSpec, defaultStretchCrushRuleName, pool); err != nil { - return errors.Wrap(err, "failed to create default stretch crush rule") - } - logger.Info("successfully created the default stretch crush rule") - return nil -} - -// SetMonStretchTiebreaker sets the tiebreaker mon in the stretch cluster -func SetMonStretchTiebreaker(context *clusterd.Context, clusterInfo *ClusterInfo, monName, bucketType string) error { - logger.Infof("enabling stretch mode with mon arbiter %q with crush rule %q in failure domain %q", monName, defaultStretchCrushRuleName, bucketType) - args := []string{"mon", "enable_stretch_mode", monName, defaultStretchCrushRuleName, bucketType} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.EINVAL) { - // TODO: Get a more distinctive error from ceph so we don't have to compare the error message - if strings.Contains(string(buf), "stretch mode is already engaged") { - logger.Infof("stretch mode is already enabled") - return nil - } - return errors.Wrapf(err, "stretch mode failed to be enabled. %s", string(buf)) - } - return errors.Wrap(err, "failed to set mon stretch zone") - } - logger.Debug(string(buf)) - logger.Infof("successfully set mon tiebreaker %q in failure domain %q", monName, bucketType) - return nil -} diff --git a/pkg/daemon/ceph/client/mon_test.go b/pkg/daemon/ceph/client/mon_test.go deleted file mode 100644 index 83ef7fe54..000000000 --- a/pkg/daemon/ceph/client/mon_test.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "fmt" - "testing" - "time" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/util/exec" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestCephArgs(t *testing.T) { - // cluster a under /etc - args := []string{} - clusterInfo := AdminClusterInfo("a") - exec.CephCommandsTimeout = 15 * time.Second - command, args := FinalizeCephCommandArgs(CephTool, clusterInfo, args, "/etc") - assert.Equal(t, CephTool, command) - assert.Equal(t, 5, len(args)) - assert.Equal(t, "--connect-timeout=15", args[0]) - assert.Equal(t, "--cluster=a", args[1]) - assert.Equal(t, "--conf=/etc/a/a.config", args[2]) - assert.Equal(t, "--name=client.admin", args[3]) - assert.Equal(t, "--keyring=/etc/a/client.admin.keyring", args[4]) - - RunAllCephCommandsInToolboxPod = "rook-ceph-tools" - args = []string{} - command, args = FinalizeCephCommandArgs(CephTool, clusterInfo, args, "/etc") - assert.Equal(t, Kubectl, command) - assert.Equal(t, 10, len(args), fmt.Sprintf("%+v", args)) - assert.Equal(t, "exec", args[0]) - assert.Equal(t, "-i", args[1]) - assert.Equal(t, "rook-ceph-tools", args[2]) - assert.Equal(t, "-n", args[3]) - assert.Equal(t, clusterInfo.Namespace, args[4]) - assert.Equal(t, "--", args[5]) - assert.Equal(t, CephTool, args[8]) - assert.Equal(t, "--connect-timeout=15", args[9]) - RunAllCephCommandsInToolboxPod = "" - - // cluster under /var/lib/rook - args = []string{"myarg"} - command, args = FinalizeCephCommandArgs(RBDTool, clusterInfo, args, "/var/lib/rook") - assert.Equal(t, RBDTool, command) - assert.Equal(t, 5, len(args)) - assert.Equal(t, "myarg", args[0]) - assert.Equal(t, "--cluster="+clusterInfo.Namespace, args[1]) - assert.Equal(t, "--conf=/var/lib/rook/a/a.config", args[2]) - assert.Equal(t, "--name=client.admin", args[3]) - assert.Equal(t, "--keyring=/var/lib/rook/a/client.admin.keyring", args[4]) -} - -func TestStretchElectionStrategy(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "mon" && args[1] == "set" && args[2] == "election_strategy" { - assert.Equal(t, "connectivity", args[3]) - return "", nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - context := &clusterd.Context{Executor: executor} - clusterInfo := AdminClusterInfo("mycluster") - - err := EnableStretchElectionStrategy(context, clusterInfo) - assert.NoError(t, err) -} - -func TestStretchClusterMonTiebreaker(t *testing.T) { - monName := "a" - failureDomain := "rack" - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - switch { - case args[0] == "mon" && args[1] == "enable_stretch_mode": - assert.Equal(t, monName, args[2]) - assert.Equal(t, defaultStretchCrushRuleName, args[3]) - assert.Equal(t, failureDomain, args[4]) - return "", nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - context := &clusterd.Context{Executor: executor} - clusterInfo := AdminClusterInfo("mycluster") - - err := SetMonStretchTiebreaker(context, clusterInfo, monName, failureDomain) - assert.NoError(t, err) -} - -func TestMonDump(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - switch { - case args[0] == "mon" && args[1] == "dump": - return `{"epoch":3,"fsid":"6a31a264-9090-4048-8d95-4b8c3cde909d","modified":"2020-12-09T18:13:36.346150Z","created":"2020-12-09T18:13:13.014270Z","min_mon_release":15,"min_mon_release_name":"octopus", - "features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]}, - "election_strategy":1,"mons":[ - {"rank":0,"name":"a","crush_location":"{zone=a}","public_addrs":{"addrvec":[{"type":"v2","addr":"10.109.80.104:3300","nonce":0},{"type":"v1","addr":"10.109.80.104:6789","nonce":0}]},"addr":"10.109.80.104:6789/0","public_addr":"10.109.80.104:6789/0","priority":0,"weight":0}, - {"rank":1,"name":"b","crush_location":"{zone=b}","public_addrs":{"addrvec":[{"type":"v2","addr":"10.107.12.199:3300","nonce":0},{"type":"v1","addr":"10.107.12.199:6789","nonce":0}]},"addr":"10.107.12.199:6789/0","public_addr":"10.107.12.199:6789/0","priority":0,"weight":0}, - {"rank":2,"name":"c","crush_location":"{zone=c}","public_addrs":{"addrvec":[{"type":"v2","addr":"10.107.5.207:3300","nonce":0},{"type":"v1","addr":"10.107.5.207:6789","nonce":0}]},"addr":"10.107.5.207:6789/0","public_addr":"10.107.5.207:6789/0","priority":0,"weight":0}], - "quorum":[0,1,2]}`, nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - context := &clusterd.Context{Executor: executor} - clusterInfo := AdminClusterInfo("mycluster") - - dump, err := GetMonDump(context, clusterInfo) - assert.NoError(t, err) - assert.Equal(t, 1, dump.ElectionStrategy) - assert.Equal(t, "{zone=a}", dump.Mons[0].CrushLocation) - assert.Equal(t, "a", dump.Mons[0].Name) - assert.Equal(t, 0, dump.Mons[0].Rank) - assert.Equal(t, "b", dump.Mons[1].Name) - assert.Equal(t, 1, dump.Mons[1].Rank) - assert.Equal(t, 3, len(dump.Mons)) - assert.Equal(t, 3, len(dump.Quorum)) -} diff --git a/pkg/daemon/ceph/client/osd.go b/pkg/daemon/ceph/client/osd.go deleted file mode 100644 index c571ec800..000000000 --- a/pkg/daemon/ceph/client/osd.go +++ /dev/null @@ -1,394 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" -) - -type OSDUsage struct { - OSDNodes []OSDNodeUsage `json:"nodes"` - Summary struct { - TotalKB json.Number `json:"total_kb"` - TotalUsedKB json.Number `json:"total_kb_used"` - TotalAvailKB json.Number `json:"total_kb_avail"` - AverageUtil json.Number `json:"average_utilization"` - } `json:"summary"` -} - -type OSDNodeUsage struct { - ID int `json:"id"` - Name string `json:"name"` - CrushWeight json.Number `json:"crush_weight"` - Depth json.Number `json:"depth"` - Reweight json.Number `json:"reweight"` - KB json.Number `json:"kb"` - UsedKB json.Number `json:"kb_used"` - AvailKB json.Number `json:"kb_avail"` - Utilization json.Number `json:"utilization"` - Variance json.Number `json:"var"` - Pgs json.Number `json:"pgs"` -} - -type OSDPerfStats struct { - PerfInfo []struct { - ID json.Number `json:"id"` - Stats struct { - CommitLatency json.Number `json:"commit_latency_ms"` - ApplyLatency json.Number `json:"apply_latency_ms"` - } `json:"perf_stats"` - } `json:"osd_perf_infos"` -} - -type OSDDump struct { - OSDs []struct { - OSD json.Number `json:"osd"` - Up json.Number `json:"up"` - In json.Number `json:"in"` - } `json:"osds"` - Flags string `json:"flags"` - CrushNodeFlags map[string][]string `json:"crush_node_flags"` -} - -// IsFlagSet checks if an OSD flag is set -func (dump *OSDDump) IsFlagSet(checkFlag string) bool { - flags := strings.Split(dump.Flags, ",") - for _, flag := range flags { - if flag == checkFlag { - return true - } - } - return false -} - -// IsFlagSetOnCrushUnit checks if an OSD flag is set on specified Crush unit -func (dump *OSDDump) IsFlagSetOnCrushUnit(checkFlag, crushUnit string) bool { - for unit, list := range dump.CrushNodeFlags { - if crushUnit == unit { - for _, flag := range list { - if flag == checkFlag { - return true - } - } - } - } - return false -} - -// UpdateFlagOnCrushUnit checks if the flag is in the desired state and sets/unsets if it isn't. Mitigates redundant calls -// it returns true if the value was changed -func (dump *OSDDump) UpdateFlagOnCrushUnit(context *clusterd.Context, clusterInfo *ClusterInfo, set bool, crushUnit, flag string) (bool, error) { - flagSet := dump.IsFlagSetOnCrushUnit(flag, crushUnit) - if flagSet && !set { - err := UnsetFlagOnCrushUnit(context, clusterInfo, crushUnit, flag) - if err != nil { - return true, err - } - return true, nil - } - if !flagSet && set { - err := SetFlagOnCrushUnit(context, clusterInfo, crushUnit, flag) - if err != nil { - return true, err - } - return true, nil - } - return false, nil -} - -// SetFlagOnCrushUnit sets the specified flag on the crush unit -func SetFlagOnCrushUnit(context *clusterd.Context, clusterInfo *ClusterInfo, crushUnit, flag string) error { - args := []string{"osd", "set-group", flag, crushUnit} - cmd := NewCephCommand(context, clusterInfo, args) - _, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to set flag %s on %s", crushUnit, flag) - } - return nil -} - -// UnsetFlagOnCrushUnit unsets the specified flag on the crush unit -func UnsetFlagOnCrushUnit(context *clusterd.Context, clusterInfo *ClusterInfo, crushUnit, flag string) error { - args := []string{"osd", "unset-group", flag, crushUnit} - cmd := NewCephCommand(context, clusterInfo, args) - _, err := cmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to unset flag %s on %s", crushUnit, flag) - } - return nil -} - -type SafeToDestroyStatus struct { - SafeToDestroy []int `json:"safe_to_destroy"` -} - -// OsdTree represents the CRUSH hierarchy -type OsdTree struct { - Nodes []struct { - ID int `json:"id"` - Name string `json:"name"` - Type string `json:"type"` - TypeID int `json:"type_id"` - Children []int `json:"children,omitempty"` - PoolWeights struct { - } `json:"pool_weights,omitempty"` - CrushWeight float64 `json:"crush_weight,omitempty"` - Depth int `json:"depth,omitempty"` - Exists int `json:"exists,omitempty"` - Status string `json:"status,omitempty"` - Reweight float64 `json:"reweight,omitempty"` - PrimaryAffinity float64 `json:"primary_affinity,omitempty"` - } `json:"nodes"` - Stray []struct { - ID int `json:"id"` - Name string `json:"name"` - Type string `json:"type"` - TypeID int `json:"type_id"` - CrushWeight float64 `json:"crush_weight"` - Depth int `json:"depth"` - Exists int `json:"exists"` - Status string `json:"status"` - Reweight float64 `json:"reweight"` - PrimaryAffinity float64 `json:"primary_affinity"` - } `json:"stray"` -} - -// OsdList returns the list of OSD by their IDs -type OsdList []int - -// StatusByID returns status and inCluster states for given OSD id -func (dump *OSDDump) StatusByID(id int64) (int64, int64, error) { - for _, d := range dump.OSDs { - i, err := d.OSD.Int64() - if err != nil { - return 0, 0, err - } - - if id == i { - in, err := d.In.Int64() - if err != nil { - return 0, 0, err - } - - up, err := d.Up.Int64() - if err != nil { - return 0, 0, err - } - - return up, in, nil - } - } - - return 0, 0, errors.Errorf("not found osd.%d in OSDDump", id) -} - -func GetOSDUsage(context *clusterd.Context, clusterInfo *ClusterInfo) (*OSDUsage, error) { - args := []string{"osd", "df"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return nil, errors.Wrap(err, "failed to get osd df") - } - - var osdUsage OSDUsage - if err := json.Unmarshal(buf, &osdUsage); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal osd df response") - } - - return &osdUsage, nil -} - -func GetOSDPerfStats(context *clusterd.Context, clusterInfo *ClusterInfo) (*OSDPerfStats, error) { - args := []string{"osd", "perf"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return nil, errors.Wrap(err, "failed to get osd perf") - } - - var osdPerfStats OSDPerfStats - if err := json.Unmarshal(buf, &osdPerfStats); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal osd perf response") - } - - return &osdPerfStats, nil -} - -func GetOSDDump(context *clusterd.Context, clusterInfo *ClusterInfo) (*OSDDump, error) { - args := []string{"osd", "dump"} - cmd := NewCephCommand(context, clusterInfo, args) - buf, err := cmd.Run() - if err != nil { - return nil, errors.Wrap(err, "failed to get osd dump") - } - - var osdDump OSDDump - if err := json.Unmarshal(buf, &osdDump); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal osd dump response") - } - - return &osdDump, nil -} - -func OSDOut(context *clusterd.Context, clusterInfo *ClusterInfo, osdID int) (string, error) { - args := []string{"osd", "out", strconv.Itoa(osdID)} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - return string(buf), err -} - -func OsdSafeToDestroy(context *clusterd.Context, clusterInfo *ClusterInfo, osdID int) (bool, error) { - args := []string{"osd", "safe-to-destroy", strconv.Itoa(osdID)} - cmd := NewCephCommand(context, clusterInfo, args) - buf, err := cmd.Run() - if err != nil { - return false, errors.Wrap(err, "failed to get safe-to-destroy status") - } - - var output SafeToDestroyStatus - if err := json.Unmarshal(buf, &output); err != nil { - return false, errors.Wrap(err, "failed to unmarshal safe-to-destroy response") - } - if len(output.SafeToDestroy) != 0 && output.SafeToDestroy[0] == osdID { - return true, nil - } - return false, nil -} - -// HostTree returns the osd tree -func HostTree(context *clusterd.Context, clusterInfo *ClusterInfo) (OsdTree, error) { - var output OsdTree - - args := []string{"osd", "tree"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return output, errors.Wrap(err, "failed to get osd tree") - } - - err = json.Unmarshal(buf, &output) - if err != nil { - return output, errors.Wrap(err, "failed to unmarshal 'osd tree' response") - } - - return output, nil -} - -// OsdListNum returns the list of OSDs -func OsdListNum(context *clusterd.Context, clusterInfo *ClusterInfo) (OsdList, error) { - var output OsdList - - args := []string{"osd", "ls"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return output, errors.Wrap(err, "failed to get osd list") - } - - err = json.Unmarshal(buf, &output) - if err != nil { - return output, errors.Wrap(err, "failed to unmarshal 'osd ls' response") - } - - return output, nil -} - -// OSDDeviceClass report device class for osd -type OSDDeviceClass struct { - ID int `json:"osd"` - DeviceClass string `json:"device_class"` -} - -// OSDDeviceClasses returns the device classes for particular OsdIDs -func OSDDeviceClasses(context *clusterd.Context, clusterInfo *ClusterInfo, osdIds []string) ([]OSDDeviceClass, error) { - var deviceClasses []OSDDeviceClass - - args := []string{"osd", "crush", "get-device-class"} - args = append(args, osdIds...) - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return deviceClasses, errors.Wrap(err, "failed to get device-class info") - } - - err = json.Unmarshal(buf, &deviceClasses) - if err != nil { - return deviceClasses, errors.Wrap(err, "failed to unmarshal 'osd crush get-device-class' response") - } - - return deviceClasses, nil -} - -// OSDOkToStopStats report detailed information about which OSDs are okay to stop -type OSDOkToStopStats struct { - OkToStop bool `json:"ok_to_stop"` - OSDs []int `json:"osds"` - NumOkPGs int `json:"num_ok_pgs"` - NumNotOkPGs int `json:"num_not_ok_pgs"` - BadBecomeInactive []string `json:"bad_become_inactive"` - OkBecomeDegraded []string `json:"ok_become_degraded"` -} - -// OSDOkToStop returns a list of OSDs that can be stopped that includes the OSD ID given. -// This is relevant, for example, when checking which OSDs can be updated. -// The number of OSDs returned is limited by the value set in maxReturned. -// maxReturned=0 is the same as maxReturned=1. -func OSDOkToStop(context *clusterd.Context, clusterInfo *ClusterInfo, osdID, maxReturned int) ([]int, error) { - args := []string{"osd", "ok-to-stop", strconv.Itoa(osdID)} - returnsList := false // does the ceph call return a list of OSD IDs? - if clusterInfo.CephVersion.IsAtLeastPacific() { - returnsList = true - // NOTE: if the number of OSD IDs given in the CLI arg query is Q and --max=N is given, if - // N < Q, Ceph treats the query as though max=Q instead, always returning at least Q OSDs. - args = append(args, fmt.Sprintf("--max=%d", maxReturned)) - } - - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - // is not ok to stop (or command error) - return []int{}, errors.Wrapf(err, "OSD %d is not ok to stop", osdID) - } - - if !returnsList { - // If does not return list, just return a slice including only the OSD ID queried - return []int{osdID}, nil - } - - var stats OSDOkToStopStats - err = json.Unmarshal(buf, &stats) - if err != nil { - // Since the command succeeded we still know that at least the given OSD ID is ok to - // stop, so we do not *have* to return an error. However, it is good to do it anyway so - // that we can catch breaking changes to JSON output in CI testing. As a middle ground - // here, return error but also return the given OSD ID in the output in case the calling - // function wants to recover from this case. - return []int{osdID}, errors.Wrapf(err, "failed to unmarshal 'osd ok-to-stop %d' response", osdID) - } - - return stats.OSDs, nil -} - -// SetPrimaryAffinity assigns primary-affinity (within range [0.0, 1.0]) to a specific OSD. -func SetPrimaryAffinity(context *clusterd.Context, clusterInfo *ClusterInfo, osdID int, affinity string) error { - logger.Infof("setting osd.%d with primary-affinity %q", osdID, affinity) - args := []string{"osd", "primary-affinity", fmt.Sprintf("osd.%d", osdID), affinity} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to set osd.%d with primary-affinity %q", osdID, affinity) - } - logger.Infof("successfully applied osd.%d primary-affinity %q", osdID, affinity) - return nil -} diff --git a/pkg/daemon/ceph/client/osd_test.go b/pkg/daemon/ceph/client/osd_test.go deleted file mode 100644 index 2e9a8ff4f..000000000 --- a/pkg/daemon/ceph/client/osd_test.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "fmt" - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client/fake" - "github.com/rook/rook/pkg/operator/ceph/version" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -var ( - fakeOsdTree = `{ - "nodes": [ - { - "id": -3, - "name": "minikube", - "type": "host", - "type_id": 1, - "pool_weights": {}, - "children": [ - 2, - 1, - 0 - ] - }, - { - "id": -2, - "name": "minikube-2", - "type": "host", - "type_id": 1, - "pool_weights": {}, - "children": [ - 3, - 4, - 5 - ] - } - ] - }` - - fakeOSdList = `[0,1,2]` -) - -func TestHostTree(t *testing.T) { - executor := &exectest.MockExecutor{} - emptyTreeResult := false - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - switch { - case args[0] == "osd" && args[1] == "tree": - if emptyTreeResult { - return `not a json`, nil - } - return fakeOsdTree, nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - tree, err := HostTree(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster")) - assert.NoError(t, err) - assert.Equal(t, 2, len(tree.Nodes)) - assert.Equal(t, "minikube", tree.Nodes[0].Name) - assert.Equal(t, 3, len(tree.Nodes[0].Children)) - - emptyTreeResult = true - tree, err = HostTree(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster")) - assert.Error(t, err) - assert.Equal(t, 0, len(tree.Nodes)) - -} - -func TestOsdListNum(t *testing.T) { - executor := &exectest.MockExecutor{} - emptyOsdListNumResult := false - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - switch { - case args[0] == "osd" && args[1] == "ls": - if emptyOsdListNumResult { - return `not a json`, nil - } - return fakeOSdList, nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - list, err := OsdListNum(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster")) - assert.NoError(t, err) - assert.Equal(t, 3, len(list)) - - emptyOsdListNumResult = true - list, err = OsdListNum(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster")) - assert.Error(t, err) - assert.Equal(t, 0, len(list)) -} - -func TestOSDDeviceClasses(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - switch { - case args[0] == "osd" && args[1] == "crush" && args[2] == "get-device-class" && len(args) > 3: - return fake.OSDDeviceClassOutput(args[3]), nil - default: - return fake.OSDDeviceClassOutput(""), nil - } - } - - context := &clusterd.Context{Executor: executor} - clusterInfo := AdminClusterInfo("mycluster") - - t.Run("device classes returned", func(t *testing.T) { - deviceClasses, err := OSDDeviceClasses(context, clusterInfo, []string{"0"}) - assert.NoError(t, err) - assert.Equal(t, deviceClasses[0].DeviceClass, "hdd") - }) - - t.Run("error happened when no id provided", func(t *testing.T) { - _, err := OSDDeviceClasses(context, clusterInfo, []string{}) - assert.Error(t, err) - }) -} - -func TestOSDOkToStop(t *testing.T) { - returnString := "" - returnOkResult := true - seenArgs := []string{} - - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - switch { - case args[0] == "osd" && args[1] == "ok-to-stop": - seenArgs = args - if returnOkResult { - return returnString, nil - } - return returnString, errors.Errorf("Error EBUSY: unsafe to stop osd(s) at this time (50 PGs are or would become offline)") - } - panic(fmt.Sprintf("unexpected ceph command %q", args)) - } - - context := &clusterd.Context{Executor: executor} - clusterInfo := AdminClusterInfo("mycluster") - - doSetup := func() { - seenArgs = []string{} - } - - t.Run("pacific output ok to stop", func(t *testing.T) { - doSetup() - clusterInfo.CephVersion = version.Pacific - returnString = fake.OsdOkToStopOutput(1, []int{1, 2}, true) - returnOkResult = true - osds, err := OSDOkToStop(context, clusterInfo, 1, 2) - assert.NoError(t, err) - assert.ElementsMatch(t, osds, []int{1, 2}) - assert.Equal(t, "1", seenArgs[2]) - assert.Equal(t, "--max=2", seenArgs[3]) - }) - - t.Run("pacific output not ok to stop", func(t *testing.T) { - doSetup() - clusterInfo.CephVersion = version.Pacific - returnString = fake.OsdOkToStopOutput(3, []int{}, true) - returnOkResult = false - _, err := OSDOkToStop(context, clusterInfo, 3, 5) - assert.Error(t, err) - assert.Equal(t, "3", seenArgs[2]) - assert.Equal(t, "--max=5", seenArgs[3]) - }) - - t.Run("pacific handles maxReturned=0", func(t *testing.T) { - doSetup() - clusterInfo.CephVersion = version.Pacific - returnString = fake.OsdOkToStopOutput(4, []int{4, 8}, true) - returnOkResult = true - osds, err := OSDOkToStop(context, clusterInfo, 4, 0) - assert.NoError(t, err) - assert.ElementsMatch(t, osds, []int{4, 8}) - assert.Equal(t, "4", seenArgs[2]) - // should just pass through as --max=0; don't do any special processing - assert.Equal(t, "--max=0", seenArgs[3]) - }) - - t.Run("octopus output not ok to stop", func(t *testing.T) { - doSetup() - clusterInfo.CephVersion = version.Octopus - returnString = fake.OsdOkToStopOutput(3, []int{}, false) - returnOkResult = false - _, err := OSDOkToStop(context, clusterInfo, 3, 5) - assert.Error(t, err) - assert.Equal(t, "3", seenArgs[2]) - assert.NotContains(t, seenArgs[3], "--max") // do not issue the "--max" flag below pacific - }) - - t.Run("octopus output ok to stop", func(t *testing.T) { - doSetup() - clusterInfo.CephVersion = version.Octopus - returnString = fake.OsdOkToStopOutput(50, []int{50}, false) - returnOkResult = true - osds, err := OSDOkToStop(context, clusterInfo, 50, 2) - assert.NoError(t, err) - assert.ElementsMatch(t, osds, []int{50}) - assert.Equal(t, "50", seenArgs[2]) - assert.NotContains(t, seenArgs[3], "--max") // do not issue the "--max" flag below pacific - }) -} diff --git a/pkg/daemon/ceph/client/pool.go b/pkg/daemon/ceph/client/pool.go deleted file mode 100644 index e1500b0d0..000000000 --- a/pkg/daemon/ceph/client/pool.go +++ /dev/null @@ -1,661 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "k8s.io/apimachinery/pkg/api/resource" -) - -const ( - confirmFlag = "--yes-i-really-mean-it" - reallyConfirmFlag = "--yes-i-really-really-mean-it" - targetSizeRatioProperty = "target_size_ratio" - compressionModeProperty = "compression_mode" - PgAutoscaleModeProperty = "pg_autoscale_mode" - PgAutoscaleModeOn = "on" -) - -type CephStoragePoolSummary struct { - Name string `json:"poolname"` - Number int `json:"poolnum"` -} - -type CephStoragePoolDetails struct { - Name string `json:"pool"` - Number int `json:"pool_id"` - Size uint `json:"size"` - ErasureCodeProfile string `json:"erasure_code_profile"` - FailureDomain string `json:"failureDomain"` - CrushRoot string `json:"crushRoot"` - DeviceClass string `json:"deviceClass"` - CompressionMode string `json:"compression_mode"` - TargetSizeRatio float64 `json:"target_size_ratio,omitempty"` - RequireSafeReplicaSize bool `json:"requireSafeReplicaSize,omitempty"` -} - -type CephStoragePoolStats struct { - Pools []struct { - Name string `json:"name"` - ID int `json:"id"` - Stats struct { - BytesUsed float64 `json:"bytes_used"` - RawBytesUsed float64 `json:"raw_bytes_used"` - MaxAvail float64 `json:"max_avail"` - Objects float64 `json:"objects"` - DirtyObjects float64 `json:"dirty"` - ReadIO float64 `json:"rd"` - ReadBytes float64 `json:"rd_bytes"` - WriteIO float64 `json:"wr"` - WriteBytes float64 `json:"wr_bytes"` - } `json:"stats"` - } `json:"pools"` -} - -type PoolStatistics struct { - Images struct { - Count int `json:"count"` - ProvisionedBytes int `json:"provisioned_bytes"` - SnapCount int `json:"snap_count"` - } `json:"images"` - Trash struct { - Count int `json:"count"` - ProvisionedBytes int `json:"provisioned_bytes"` - SnapCount int `json:"snap_count"` - } `json:"trash"` -} - -func ListPoolSummaries(context *clusterd.Context, clusterInfo *ClusterInfo) ([]CephStoragePoolSummary, error) { - args := []string{"osd", "lspools"} - output, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return nil, errors.Wrap(err, "failed to list pools") - } - - var pools []CephStoragePoolSummary - err = json.Unmarshal(output, &pools) - if err != nil { - return nil, errors.Wrapf(err, "unmarshal failed raw buffer response %s", string(output)) - } - - return pools, nil -} - -func GetPoolNamesByID(context *clusterd.Context, clusterInfo *ClusterInfo) (map[int]string, error) { - pools, err := ListPoolSummaries(context, clusterInfo) - if err != nil { - return nil, errors.Wrap(err, "failed to list pools") - } - names := map[int]string{} - for _, p := range pools { - names[p.Number] = p.Name - } - return names, nil -} - -// GetPoolDetails gets all the details of a given pool -func GetPoolDetails(context *clusterd.Context, clusterInfo *ClusterInfo, name string) (CephStoragePoolDetails, error) { - args := []string{"osd", "pool", "get", name, "all"} - output, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return CephStoragePoolDetails{}, errors.Wrapf(err, "failed to get pool %s details. %s", name, string(output)) - } - - // The response for osd pool get when passing var=all is actually malformed JSON similar to: - // {"pool":"rbd","size":1}{"pool":"rbd","min_size":2}... - // Note the multiple top level entities, one for each property returned. To workaround this, - // we split the JSON response string into its top level entities, then iterate through them, cleaning - // up the JSON. A single pool details object is repeatedly used to unmarshal each JSON snippet into. - // Since previously set fields remain intact if they are not overwritten, the result is the JSON - // unmarshalling of all properties in the response. - var poolDetails CephStoragePoolDetails - poolDetailsUnits := strings.Split(string(output), "}{") - for i := range poolDetailsUnits { - pdu := poolDetailsUnits[i] - if !strings.HasPrefix(pdu, "{") { - pdu = "{" + pdu - } - if !strings.HasSuffix(pdu, "}") { - pdu += "}" - } - err := json.Unmarshal([]byte(pdu), &poolDetails) - if err != nil { - return CephStoragePoolDetails{}, errors.Wrapf(err, "unmarshal failed raw buffer response %s", string(output)) - } - } - - return poolDetails, nil -} - -func CreatePoolWithProfile(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, poolName string, pool cephv1.PoolSpec, appName string) error { - if pool.IsReplicated() { - return CreateReplicatedPoolForApp(context, clusterInfo, clusterSpec, poolName, pool, DefaultPGCount, appName) - } - - if !pool.IsErasureCoded() { - // neither a replicated or EC pool - return fmt.Errorf("pool %q type is not defined as replicated or erasure coded", poolName) - } - - // create a new erasure code profile for the new pool - ecProfileName := GetErasureCodeProfileForPool(poolName) - if err := CreateErasureCodeProfile(context, clusterInfo, ecProfileName, pool); err != nil { - return errors.Wrapf(err, "failed to create erasure code profile for pool %q", poolName) - } - - // If the pool is not a replicated pool, then the only other option is an erasure coded pool. - return CreateECPoolForApp( - context, - clusterInfo, - poolName, - ecProfileName, - pool, - DefaultPGCount, - appName, - true /* enableECOverwrite */) -} - -func checkForImagesInPool(context *clusterd.Context, clusterInfo *ClusterInfo, name string) error { - var err error - logger.Debugf("checking any images/snapshosts present in pool %q", name) - stats, err := GetPoolStatistics(context, clusterInfo, name) - if err != nil { - if strings.Contains(err.Error(), "No such file or directory") { - return nil - } - return errors.Wrapf(err, "failed to list images/snapshosts in pool %s", name) - } - if stats.Images.Count == 0 && stats.Images.SnapCount == 0 { - logger.Infof("no images/snapshosts present in pool %q", name) - return nil - } - - return errors.Errorf("pool %q contains images/snapshosts", name) -} - -// DeletePool purges a pool from Ceph -func DeletePool(context *clusterd.Context, clusterInfo *ClusterInfo, name string) error { - // check if the pool exists - pool, err := GetPoolDetails(context, clusterInfo, name) - if err != nil { - return errors.Wrapf(err, "failed to get pool %q details", name) - } - - err = checkForImagesInPool(context, clusterInfo, name) - if err != nil { - return errors.Wrapf(err, "failed to check if pool %q has rbd images", name) - } - - logger.Infof("purging pool %q (id=%d)", name, pool.Number) - args := []string{"osd", "pool", "delete", name, name, reallyConfirmFlag} - _, err = NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to delete pool %q", name) - } - - // remove the crush rule for this pool and ignore the error in case the rule is still in use or not found - args = []string{"osd", "crush", "rule", "rm", name} - _, err = NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - logger.Errorf("failed to delete crush rule %q. %v", name, err) - } - - logger.Infof("purge completed for pool %q", name) - return nil -} - -func givePoolAppTag(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, appName string) error { - args := []string{"osd", "pool", "application", "enable", poolName, appName, confirmFlag} - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to enable application %s on pool %s", appName, poolName) - } - - return nil -} - -func setCommonPoolProperties(context *clusterd.Context, clusterInfo *ClusterInfo, pool cephv1.PoolSpec, poolName, appName string) error { - if len(pool.Parameters) == 0 { - pool.Parameters = make(map[string]string) - } - - if pool.Replicated.IsTargetRatioEnabled() { - pool.Parameters[targetSizeRatioProperty] = strconv.FormatFloat(pool.Replicated.TargetSizeRatio, 'f', -1, 32) - } - - if pool.IsCompressionEnabled() { - pool.Parameters[compressionModeProperty] = pool.CompressionMode - } - - // Apply properties - for propName, propValue := range pool.Parameters { - err := SetPoolProperty(context, clusterInfo, poolName, propName, propValue) - if err != nil { - logger.Errorf("failed to set property %q to pool %q to %q. %v", propName, poolName, propValue, err) - } - } - - // ensure that the newly created pool gets an application tag - if appName != "" { - err := givePoolAppTag(context, clusterInfo, poolName, appName) - if err != nil { - return errors.Wrapf(err, "failed to tag pool %q for application %q", poolName, appName) - } - } - - // If the pool is mirrored, let's enable mirroring - // we don't need to check if the pool is erasure coded or not, mirroring will still work, it will simply be slow - if pool.Mirroring.Enabled { - err := enablePoolMirroring(context, clusterInfo, pool, poolName) - if err != nil { - return errors.Wrapf(err, "failed to enable mirroring for pool %q", poolName) - } - - // Schedule snapshots - if pool.Mirroring.SnapshotSchedulesEnabled() && clusterInfo.CephVersion.IsAtLeastOctopus() { - err = enableSnapshotSchedules(context, clusterInfo, pool, poolName) - if err != nil { - return errors.Wrapf(err, "failed to enable snapshot scheduling for pool %q", poolName) - } - } - } else { - if pool.Mirroring.Mode == "pool" { - // Remove storage cluster peers - mirrorInfo, err := GetPoolMirroringInfo(context, clusterInfo, poolName) - if err != nil { - return errors.Wrapf(err, "failed to get mirroring info for the pool %q", poolName) - } - for _, peer := range mirrorInfo.Peers { - if peer.UUID != "" { - err := removeClusterPeer(context, clusterInfo, poolName, peer.UUID) - if err != nil { - return errors.Wrapf(err, "failed to remove cluster peer with UUID %q for the pool %q", peer.UUID, poolName) - } - } - } - - // Disable mirroring - err = disablePoolMirroring(context, clusterInfo, poolName) - if err != nil { - return errors.Wrapf(err, "failed to disable mirroring for pool %q", poolName) - } - } else if pool.Mirroring.Mode == "image" { - logger.Warningf("manually disable mirroring on images in the pool %q", poolName) - } - } - - // set maxSize quota - if pool.Quotas.MaxSize != nil { - // check for format errors - maxBytesQuota, err := resource.ParseQuantity(*pool.Quotas.MaxSize) - if err != nil { - if err == resource.ErrFormatWrong { - return errors.Wrapf(err, "maxSize quota incorrectly formatted for pool %q, valid units include k, M, G, T, P, E, Ki, Mi, Gi, Ti, Pi, Ei", poolName) - } - return errors.Wrapf(err, "failed setting quota for pool %q, maxSize quota parse error", poolName) - } - // set max_bytes quota, 0 value disables quota - err = setPoolQuota(context, clusterInfo, poolName, "max_bytes", strconv.FormatInt(maxBytesQuota.Value(), 10)) - if err != nil { - return errors.Wrapf(err, "failed to set max_bytes quota for pool %q", poolName) - } - } else if pool.Quotas.MaxBytes != nil { - // set max_bytes quota, 0 value disables quota - err := setPoolQuota(context, clusterInfo, poolName, "max_bytes", strconv.FormatUint(*pool.Quotas.MaxBytes, 10)) - if err != nil { - return errors.Wrapf(err, "failed to set max_bytes quota for pool %q", poolName) - } - } - // set max_objects quota - if pool.Quotas.MaxObjects != nil { - // set max_objects quota, 0 value disables quota - err := setPoolQuota(context, clusterInfo, poolName, "max_objects", strconv.FormatUint(*pool.Quotas.MaxObjects, 10)) - if err != nil { - return errors.Wrapf(err, "failed to set max_objects quota for pool %q", poolName) - } - } - - return nil -} - -func GetErasureCodeProfileForPool(baseName string) string { - return fmt.Sprintf("%s_ecprofile", baseName) -} - -func CreateECPoolForApp(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, ecProfileName string, pool cephv1.PoolSpec, pgCount, appName string, enableECOverwrite bool) error { - args := []string{"osd", "pool", "create", poolName, pgCount, "erasure", ecProfileName} - output, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to create EC pool %s. %s", poolName, string(output)) - } - - if enableECOverwrite { - if err = SetPoolProperty(context, clusterInfo, poolName, "allow_ec_overwrites", "true"); err != nil { - return errors.Wrapf(err, "failed to allow EC overwrite for pool %s", poolName) - } - } - - if err = setCommonPoolProperties(context, clusterInfo, pool, poolName, appName); err != nil { - return err - } - - logger.Infof("creating EC pool %s succeeded", poolName) - return nil -} - -func CreateReplicatedPoolForApp(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, poolName string, pool cephv1.PoolSpec, pgCount, appName string) error { - // The crush rule name is the same as the pool unless we have a stretch cluster. - crushRuleName := poolName - if clusterSpec.IsStretchCluster() { - // A stretch cluster enforces using the same crush rule for all pools. - // The stretch cluster rule is created initially by the operator when the stretch cluster is configured - // so there is no need to create a new crush rule for the pools here. - crushRuleName = defaultStretchCrushRuleName - } else if pool.IsHybridStoragePool() { - // Create hybrid crush rule - err := createHybridCrushRule(context, clusterInfo, clusterSpec, crushRuleName, pool) - if err != nil { - return errors.Wrapf(err, "failed to create hybrid crush rule %q", crushRuleName) - } - } else { - if pool.Replicated.ReplicasPerFailureDomain > 1 { - // Create a two-step CRUSH rule for pools other than stretch clusters - err := createStretchCrushRule(context, clusterInfo, clusterSpec, crushRuleName, pool) - if err != nil { - return errors.Wrapf(err, "failed to create two-step crush rule %q", crushRuleName) - } - } else { - // create a crush rule for a replicated pool, if a failure domain is specified - if err := createReplicationCrushRule(context, clusterInfo, clusterSpec, crushRuleName, pool); err != nil { - return errors.Wrapf(err, "failed to create replicated crush rule %q", crushRuleName) - } - } - } - - args := []string{"osd", "pool", "create", poolName, pgCount, "replicated", crushRuleName, "--size", strconv.FormatUint(uint64(pool.Replicated.Size), 10)} - output, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to create replicated pool %s. %s", poolName, string(output)) - } - - if !clusterSpec.IsStretchCluster() { - // the pool is type replicated, set the size for the pool now that it's been created - if err := SetPoolReplicatedSizeProperty(context, clusterInfo, poolName, strconv.FormatUint(uint64(pool.Replicated.Size), 10)); err != nil { - return errors.Wrapf(err, "failed to set size property to replicated pool %q to %d", poolName, pool.Replicated.Size) - } - } - - if err = setCommonPoolProperties(context, clusterInfo, pool, poolName, appName); err != nil { - return err - } - - logger.Infof("creating replicated pool %s succeeded", poolName) - return nil -} - -func createStretchCrushRule(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, ruleName string, pool cephv1.PoolSpec) error { - // set the crush root to the default if not already specified - if pool.CrushRoot == "" { - pool.CrushRoot = GetCrushRootFromSpec(clusterSpec) - } - - // set the crush failure domain to the "host" if not already specified - if pool.FailureDomain == "" { - pool.FailureDomain = cephv1.DefaultFailureDomain - } - - // set the crush failure sub domain to the "host" if not already specified - if pool.Replicated.SubFailureDomain == "" { - pool.Replicated.SubFailureDomain = cephv1.DefaultFailureDomain - } - - if pool.FailureDomain == pool.Replicated.SubFailureDomain { - return errors.Errorf("failure and subfailure domains cannot be identical, current is %q", pool.FailureDomain) - } - - crushMap, err := getCurrentCrushMap(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get current crush map") - } - - if crushRuleExists(crushMap, ruleName) { - logger.Debugf("CRUSH rule %q already exists", ruleName) - return nil - } - - // Build plain text rule - ruleset := buildTwoStepPlainCrushRule(crushMap, ruleName, pool) - - return updateCrushMap(context, clusterInfo, ruleset) -} - -func createHybridCrushRule(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, ruleName string, pool cephv1.PoolSpec) error { - // set the crush root to the default if not already specified - if pool.CrushRoot == "" { - pool.CrushRoot = GetCrushRootFromSpec(clusterSpec) - } - - // set the crush failure domain to the "host" if not already specified - if pool.FailureDomain == "" { - pool.FailureDomain = cephv1.DefaultFailureDomain - } - - crushMap, err := getCurrentCrushMap(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get current crush map") - } - - if crushRuleExists(crushMap, ruleName) { - logger.Debugf("CRUSH rule %q already exists", ruleName) - return nil - } - - ruleset := buildTwoStepHybridCrushRule(crushMap, ruleName, pool) - - return updateCrushMap(context, clusterInfo, ruleset) -} - -func updateCrushMap(context *clusterd.Context, clusterInfo *ClusterInfo, ruleset string) error { - - // Fetch the compiled crush map - compiledCRUSHMapFilePath, err := GetCompiledCrushMap(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get crush map") - } - defer func() { - err := os.Remove(compiledCRUSHMapFilePath) - if err != nil { - logger.Errorf("failed to remove file %q. %v", compiledCRUSHMapFilePath, err) - } - }() - - // Decompile the plain text to CRUSH binary format - err = decompileCRUSHMap(context, compiledCRUSHMapFilePath) - if err != nil { - return errors.Wrap(err, "failed to compile crush map") - } - decompiledCRUSHMapFilePath := buildDecompileCRUSHFileName(compiledCRUSHMapFilePath) - defer func() { - err := os.Remove(decompiledCRUSHMapFilePath) - if err != nil { - logger.Errorf("failed to remove file %q. %v", decompiledCRUSHMapFilePath, err) - } - }() - - // Append plain rule to the decompiled crush map - f, err := os.OpenFile(filepath.Clean(decompiledCRUSHMapFilePath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0400) - if err != nil { - return errors.Wrapf(err, "failed to open decompiled crush map %q", decompiledCRUSHMapFilePath) - } - defer func() { - err := f.Close() - if err != nil { - logger.Errorf("failed to close file %q. %v", f.Name(), err) - } - }() - - // Append the new crush rule into the crush map - if _, err := f.WriteString(ruleset); err != nil { - return errors.Wrapf(err, "failed to append replicated plain crush rule to decompiled crush map %q", decompiledCRUSHMapFilePath) - } - - // Compile the plain text to CRUSH binary format - err = compileCRUSHMap(context, decompiledCRUSHMapFilePath) - if err != nil { - return errors.Wrap(err, "failed to compile crush map") - } - defer func() { - err := os.Remove(buildCompileCRUSHFileName(decompiledCRUSHMapFilePath)) - if err != nil { - logger.Errorf("failed to remove file %q. %v", buildCompileCRUSHFileName(decompiledCRUSHMapFilePath), err) - } - }() - - // Inject the new CRUSH Map - err = injectCRUSHMap(context, clusterInfo, buildCompileCRUSHFileName(decompiledCRUSHMapFilePath)) - if err != nil { - return errors.Wrap(err, "failed to inject crush map") - } - - return nil -} - -func createReplicationCrushRule(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, ruleName string, pool cephv1.PoolSpec) error { - failureDomain := pool.FailureDomain - if failureDomain == "" { - failureDomain = cephv1.DefaultFailureDomain - } - // set the crush root to the default if not already specified - crushRoot := pool.CrushRoot - if pool.CrushRoot == "" { - crushRoot = GetCrushRootFromSpec(clusterSpec) - } - - args := []string{"osd", "crush", "rule", "create-replicated", ruleName, crushRoot, failureDomain} - - var deviceClass string - if pool.DeviceClass != "" { - deviceClass = pool.DeviceClass - args = append(args, deviceClass) - } - - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to create crush rule %s", ruleName) - } - - return nil -} - -// SetPoolProperty sets a property to a given pool -func SetPoolProperty(context *clusterd.Context, clusterInfo *ClusterInfo, name, propName, propVal string) error { - args := []string{"osd", "pool", "set", name, propName, propVal} - logger.Infof("setting pool property %q to %q on pool %q", propName, propVal, name) - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to set pool property %q on pool %q", propName, name) - } - return nil -} - -// setPoolQuota sets quotas on a given pool -func setPoolQuota(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, quotaType, quotaVal string) error { - args := []string{"osd", "pool", "set-quota", poolName, quotaType, quotaVal} - logger.Infof("setting quota %q=%q on pool %q", quotaType, quotaVal, poolName) - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to set %q quota on pool %q", quotaType, poolName) - } - return nil -} - -// SetPoolReplicatedSizeProperty sets the replica size of a pool -func SetPoolReplicatedSizeProperty(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, size string) error { - propName := "size" - args := []string{"osd", "pool", "set", poolName, propName, size} - if size == "1" { - args = append(args, "--yes-i-really-mean-it") - } - - _, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to set pool property %q on pool %q", propName, poolName) - } - - return nil -} - -func GetPoolStats(context *clusterd.Context, clusterInfo *ClusterInfo) (*CephStoragePoolStats, error) { - args := []string{"df", "detail"} - output, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return nil, errors.Wrap(err, "failed to get pool stats") - } - - var poolStats CephStoragePoolStats - if err := json.Unmarshal(output, &poolStats); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal pool stats response") - } - - return &poolStats, nil -} - -func GetPoolStatistics(context *clusterd.Context, clusterInfo *ClusterInfo, name string) (*PoolStatistics, error) { - args := []string{"pool", "stats", name} - cmd := NewRBDCommand(context, clusterInfo, args) - cmd.JsonOutput = true - output, err := cmd.Run() - if err != nil { - return nil, errors.Wrap(err, "failed to get pool stats") - } - - var poolStats PoolStatistics - if err := json.Unmarshal(output, &poolStats); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal pool stats response") - } - - return &poolStats, nil -} - -func crushRuleExists(crushMap CrushMap, ruleName string) bool { - // Check if the crush rule already exists - for _, rule := range crushMap.Rules { - if rule.Name == ruleName { - return true - } - } - - return false -} - -func getCurrentCrushMap(context *clusterd.Context, clusterInfo *ClusterInfo) (CrushMap, error) { - crushMap, err := GetCrushMap(context, clusterInfo) - if err != nil { - return CrushMap{}, errors.Wrap(err, "failed to get crush map") - } - - return crushMap, nil -} diff --git a/pkg/daemon/ceph/client/pool_test.go b/pkg/daemon/ceph/client/pool_test.go deleted file mode 100644 index 0ed868830..000000000 --- a/pkg/daemon/ceph/client/pool_test.go +++ /dev/null @@ -1,452 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "os/exec" - "reflect" - "strconv" - "testing" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestCreateECPoolWithOverwrites(t *testing.T) { - testCreateECPool(t, true, "") -} - -func TestCreateECPoolWithoutOverwrites(t *testing.T) { - testCreateECPool(t, false, "") -} - -func TestCreateECPoolWithCompression(t *testing.T) { - testCreateECPool(t, false, "aggressive") - testCreateECPool(t, true, "none") -} - -func testCreateECPool(t *testing.T, overwrite bool, compressionMode string) { - poolName := "mypool" - compressionModeCreated := false - p := cephv1.PoolSpec{ - FailureDomain: "host", - ErasureCoded: cephv1.ErasureCodedSpec{}, - } - if compressionMode != "" { - p.CompressionMode = compressionMode - } - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "pool" { - if args[2] == "create" { - assert.Equal(t, "mypool", args[3]) - assert.Equal(t, "erasure", args[5]) - assert.Equal(t, "mypoolprofile", args[6]) - return "", nil - } - if args[2] == "set" { - assert.Equal(t, "mypool", args[3]) - if args[4] == "allow_ec_overwrites" { - assert.Equal(t, true, overwrite) - assert.Equal(t, "true", args[5]) - return "", nil - } - if args[4] == "compression_mode" { - assert.Equal(t, compressionMode, args[5]) - compressionModeCreated = true - return "", nil - } - } - if args[2] == "application" { - assert.Equal(t, "enable", args[3]) - assert.Equal(t, "mypool", args[4]) - assert.Equal(t, "myapp", args[5]) - return "", nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - err := CreateECPoolForApp(context, AdminClusterInfo("mycluster"), poolName, "mypoolprofile", p, DefaultPGCount, "myapp", overwrite) - assert.Nil(t, err) - if compressionMode != "" { - assert.True(t, compressionModeCreated) - } else { - assert.False(t, compressionModeCreated) - } -} - -func TestCreateReplicaPoolWithFailureDomain(t *testing.T) { - testCreateReplicaPool(t, "osd", "mycrushroot", "", "") -} - -func TestCreateReplicaPoolWithDeviceClass(t *testing.T) { - testCreateReplicaPool(t, "osd", "mycrushroot", "hdd", "") -} - -func TestCreateReplicaPoolWithCompression(t *testing.T) { - testCreateReplicaPool(t, "osd", "mycrushroot", "hdd", "passive") - testCreateReplicaPool(t, "osd", "mycrushroot", "hdd", "force") -} - -func testCreateReplicaPool(t *testing.T, failureDomain, crushRoot, deviceClass, compressionMode string) { - crushRuleCreated := false - compressionModeCreated := false - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "pool" { - if args[2] == "create" { - assert.Equal(t, "mypool", args[3]) - assert.Equal(t, "replicated", args[5]) - assert.Equal(t, "--size", args[7]) - assert.Equal(t, "12345", args[8]) - return "", nil - } - if args[2] == "set" { - assert.Equal(t, "mypool", args[3]) - if args[4] == "size" { - assert.Equal(t, "12345", args[5]) - } - if args[4] == "compression_mode" { - assert.Equal(t, compressionMode, args[5]) - compressionModeCreated = true - } - return "", nil - } - if args[2] == "application" { - assert.Equal(t, "enable", args[3]) - assert.Equal(t, "mypool", args[4]) - assert.Equal(t, "myapp", args[5]) - return "", nil - } - } - if args[1] == "crush" { - crushRuleCreated = true - assert.Equal(t, "rule", args[2]) - assert.Equal(t, "create-replicated", args[3]) - assert.Equal(t, "mypool", args[4]) - if crushRoot == "" { - assert.Equal(t, "cluster-crush-root", args[5]) - } else { - assert.Equal(t, crushRoot, args[5]) - } - if failureDomain == "" { - assert.Equal(t, "host", args[6]) - } else { - assert.Equal(t, failureDomain, args[6]) - } - if deviceClass == "" { - assert.False(t, testIsStringInSlice("hdd", args)) - } else { - assert.Equal(t, deviceClass, args[7]) - } - return "", nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - p := cephv1.PoolSpec{ - FailureDomain: failureDomain, CrushRoot: crushRoot, DeviceClass: deviceClass, - Replicated: cephv1.ReplicatedSpec{Size: 12345}, - } - if compressionMode != "" { - p.CompressionMode = compressionMode - } - clusterSpec := &cephv1.ClusterSpec{Storage: cephv1.StorageScopeSpec{Config: map[string]string{CrushRootConfigKey: "cluster-crush-root"}}} - err := CreateReplicatedPoolForApp(context, AdminClusterInfo("mycluster"), clusterSpec, "mypool", p, DefaultPGCount, "myapp") - assert.Nil(t, err) - assert.True(t, crushRuleCreated) - if compressionMode != "" { - assert.True(t, compressionModeCreated) - } else { - assert.False(t, compressionModeCreated) - } -} - -func testIsStringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -func TestGetPoolStatistics(t *testing.T) { - p := PoolStatistics{} - p.Images.Count = 1 - p.Images.ProvisionedBytes = 1024 - p.Images.SnapCount = 1 - p.Trash.Count = 1 - p.Trash.ProvisionedBytes = 2048 - p.Trash.SnapCount = 0 - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - a := "{\"images\":{\"count\":1,\"provisioned_bytes\":1024,\"snap_count\":1},\"trash\":{\"count\":1,\"provisioned_bytes\":2048,\"snap_count\":0}}" - logger.Infof("Command: %s %v", command, args) - - if args[0] == "pool" { - if args[1] == "stats" { - if args[2] == "replicapool" { - return a, nil - } - return "", errors.Errorf("rbd:error opening pool '%s': (2) No such file or directory", args[3]) - - } - } - return "", errors.Errorf("unexpected rbd command %q", args) - } - - clusterInfo := AdminClusterInfo("mycluster") - stats, err := GetPoolStatistics(context, clusterInfo, "replicapool") - assert.Nil(t, err) - assert.True(t, reflect.DeepEqual(stats, &p)) - - stats, err = GetPoolStatistics(context, clusterInfo, "rbd") - assert.NotNil(t, err) - assert.Nil(t, stats) -} - -func TestSetPoolReplicatedSizeProperty(t *testing.T) { - poolName := "mypool" - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - - if args[2] == "set" { - assert.Equal(t, poolName, args[3]) - assert.Equal(t, "size", args[4]) - assert.Equal(t, "3", args[5]) - return "", nil - } - - return "", errors.Errorf("unexpected ceph command %q", args) - } - - err := SetPoolReplicatedSizeProperty(context, AdminClusterInfo("mycluster"), poolName, "3") - assert.NoError(t, err) - - // TEST POOL SIZE 1 AND RequireSafeReplicaSize True - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - - if args[2] == "set" { - assert.Equal(t, "mypool", args[3]) - assert.Equal(t, "size", args[4]) - assert.Equal(t, "1", args[5]) - assert.Equal(t, "--yes-i-really-mean-it", args[6]) - return "", nil - } - - return "", errors.Errorf("unexpected ceph command %q", args) - } - - err = SetPoolReplicatedSizeProperty(context, AdminClusterInfo("mycluster"), poolName, "1") - assert.NoError(t, err) -} - -func TestCreateStretchCrushRule(t *testing.T) { - testCreateStretchCrushRule(t, true) - testCreateStretchCrushRule(t, false) -} - -func testCreateStretchCrushRule(t *testing.T, alreadyExists bool) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "osd" { - if args[1] == "getcrushmap" { - return "", nil - } - if args[1] == "setcrushmap" { - if alreadyExists { - return "", errors.New("setcrushmap not expected for already existing crush rule") - } - return "", nil - } - } - if command == "crushtool" { - switch { - case args[0] == "--decompile" || args[0] == "--compile": - if alreadyExists { - return "", errors.New("--compile or --decompile not expected for already existing crush rule") - } - return "", nil - } - } - if args[0] == "osd" && args[1] == "crush" && args[2] == "dump" { - return testCrushMap, nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - clusterInfo := AdminClusterInfo("mycluster") - clusterSpec := &cephv1.ClusterSpec{} - poolSpec := cephv1.PoolSpec{FailureDomain: "rack"} - ruleName := "testrule" - if alreadyExists { - ruleName = "replicated_ruleset" - } - - err := createStretchCrushRule(context, clusterInfo, clusterSpec, ruleName, poolSpec) - assert.NoError(t, err) -} - -func TestCreatePoolWithReplicasPerFailureDomain(t *testing.T) { - // This test goes via the path of explicit compile/decompile CRUSH map; ignored if 'crushtool' is not installed - // on local build machine - if hasCrushtool() { - testCreatePoolWithReplicasPerFailureDomain(t, "host", "mycrushroot", "hdd") - testCreatePoolWithReplicasPerFailureDomain(t, "rack", "mycrushroot", "ssd") - } -} - -func testCreatePoolWithReplicasPerFailureDomain(t *testing.T, failureDomain, crushRoot, deviceClass string) { - poolName := "mypool-with-two-step-clush-rule" - poolRuleCreated := false - poolRuleSet := false - poolAppEnable := false - poolSpec := cephv1.PoolSpec{ - FailureDomain: failureDomain, - CrushRoot: crushRoot, - DeviceClass: deviceClass, - Replicated: cephv1.ReplicatedSpec{ - Size: 12345678, - ReplicasPerFailureDomain: 2, - }, - } - - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - assert.Equal(t, command, "ceph") - assert.Equal(t, args[0], "osd") - if len(args) >= 3 && args[1] == "crush" && args[2] == "dump" { - return testCrushMap, nil - } - if len(args) >= 3 && args[1] == "pool" && args[2] == "create" { - // Currently, CRUSH-rule name equals pool's name - assert.GreaterOrEqual(t, len(args), 7) - assert.Equal(t, args[3], poolName) - assert.Equal(t, args[5], "replicated") - crushRuleName := args[6] - assert.Equal(t, crushRuleName, poolName) - poolRuleCreated = true - return "", nil - } - if len(args) >= 3 && args[1] == "pool" && args[2] == "set" { - crushRuleName := args[3] - assert.Equal(t, crushRuleName, poolName) - assert.Equal(t, args[4], "size") - poolSize, err := strconv.Atoi(args[5]) - assert.NoError(t, err) - assert.Equal(t, uint(poolSize), poolSpec.Replicated.Size) - poolRuleSet = true - return "", nil - } - if len(args) >= 4 && args[1] == "pool" && args[2] == "application" && args[3] == "enable" { - crushRuleName := args[4] - assert.Equal(t, crushRuleName, poolName) - poolAppEnable = true - return "", nil - } - if len(args) >= 4 && args[1] == "crush" && args[2] == "rule" && args[3] == "create-replicated" { - crushRuleName := args[4] - assert.Equal(t, crushRuleName, poolName) - deviceClassName := args[7] - assert.Equal(t, deviceClassName, deviceClass) - poolRuleCreated = true - return "", nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - context := &clusterd.Context{Executor: executor} - clusterSpec := &cephv1.ClusterSpec{Storage: cephv1.StorageScopeSpec{Config: map[string]string{CrushRootConfigKey: "cluster-crush-root"}}} - err := CreateReplicatedPoolForApp(context, AdminClusterInfo("mycluster"), clusterSpec, poolName, poolSpec, DefaultPGCount, "myapp") - assert.Nil(t, err) - assert.True(t, poolRuleCreated) - assert.True(t, poolRuleSet) - assert.True(t, poolAppEnable) -} - -func TestCreateHybridCrushRule(t *testing.T) { - testCreateHybridCrushRule(t, true) - testCreateHybridCrushRule(t, false) -} - -func testCreateHybridCrushRule(t *testing.T, alreadyExists bool) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "osd" { - if args[1] == "getcrushmap" { - return "", nil - } - if args[1] == "setcrushmap" { - if alreadyExists { - return "", errors.New("setcrushmap not expected for already existing crush rule") - } - return "", nil - } - } - if command == "crushtool" { - switch { - case args[0] == "--decompile" || args[0] == "--compile": - if alreadyExists { - return "", errors.New("--compile or --decompile not expected for already existing crush rule") - } - return "", nil - } - } - if args[0] == "osd" && args[1] == "crush" && args[2] == "dump" { - return testCrushMap, nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - clusterInfo := AdminClusterInfo("mycluster") - clusterSpec := &cephv1.ClusterSpec{} - poolSpec := cephv1.PoolSpec{ - FailureDomain: "rack", - Replicated: cephv1.ReplicatedSpec{ - HybridStorage: &cephv1.HybridStorageSpec{ - PrimaryDeviceClass: "ssd", - SecondaryDeviceClass: "hdd", - }, - }, - } - ruleName := "testrule" - if alreadyExists { - ruleName = "hybrid_ruleset" - } - - err := createHybridCrushRule(context, clusterInfo, clusterSpec, ruleName, poolSpec) - assert.NoError(t, err) -} - -func hasCrushtool() bool { - _, err := exec.LookPath("crushtool") - return err == nil -} diff --git a/pkg/daemon/ceph/client/status.go b/pkg/daemon/ceph/client/status.go deleted file mode 100644 index 7cae34c6e..000000000 --- a/pkg/daemon/ceph/client/status.go +++ /dev/null @@ -1,308 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "encoding/json" - "fmt" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" -) - -const ( - // CephHealthOK denotes the status of ceph cluster when healthy. - CephHealthOK = "HEALTH_OK" - - // CephHealthWarn denotes the status of ceph cluster when unhealthy but recovering. - CephHealthWarn = "HEALTH_WARN" - - // CephHealthErr denotes the status of ceph cluster when unhealthy but usually needs - // manual intervention. - CephHealthErr = "HEALTH_ERR" -) - -const ( - activeClean = "active+clean" - activeCleanScrubbing = "active+clean+scrubbing" - activeCleanScrubbingDeep = "active+clean+scrubbing+deep" -) - -type CephStatus struct { - Health HealthStatus `json:"health"` - FSID string `json:"fsid"` - ElectionEpoch int `json:"election_epoch"` - Quorum []int `json:"quorum"` - QuorumNames []string `json:"quorum_names"` - MonMap MonMap `json:"monmap"` - OsdMap struct { - OsdMap OsdMap `json:"osdmap"` - } `json:"osdmap"` - PgMap PgMap `json:"pgmap"` - MgrMap MgrMap `json:"mgrmap"` - Fsmap Fsmap `json:"fsmap"` -} - -type HealthStatus struct { - Status string `json:"status"` - Checks map[string]CheckMessage `json:"checks"` -} - -type CheckMessage struct { - Severity string `json:"severity"` - Summary Summary `json:"summary"` -} - -type Summary struct { - Message string `json:"message"` -} - -type MonMap struct { - Epoch int `json:"epoch"` - FSID string `json:"fsid"` - CreatedTime string `json:"created"` - ModifiedTime string `json:"modified"` - Mons []MonMapEntry `json:"mons"` -} - -type MgrStat struct { - Epoch int `json:"epoch"` - Available bool `json:"available"` - ActiveName string `json:"active_name"` - NumStandby int `json:"num_standby"` -} - -type MgrMap struct { - Epoch int `json:"epoch"` - ActiveGID int `json:"active_gid"` - ActiveName string `json:"active_name"` - ActiveAddr string `json:"active_addr"` - Available bool `json:"available"` - Standbys []MgrStandby `json:"standbys"` -} - -type MgrStandby struct { - GID int `json:"gid"` - Name string `json:"name"` -} - -type OsdMap struct { - Epoch int `json:"epoch"` - NumOsd int `json:"num_osds"` - NumUpOsd int `json:"num_up_osds"` - NumInOsd int `json:"num_in_osds"` - Full bool `json:"full"` - NearFull bool `json:"nearfull"` - NumRemappedPgs int `json:"num_remapped_pgs"` -} - -type PgMap struct { - PgsByState []PgStateEntry `json:"pgs_by_state"` - Version int `json:"version"` - NumPgs int `json:"num_pgs"` - DataBytes uint64 `json:"data_bytes"` - UsedBytes uint64 `json:"bytes_used"` - AvailableBytes uint64 `json:"bytes_avail"` - TotalBytes uint64 `json:"bytes_total"` - ReadBps uint64 `json:"read_bytes_sec"` - WriteBps uint64 `json:"write_bytes_sec"` - ReadOps uint64 `json:"read_op_per_sec"` - WriteOps uint64 `json:"write_op_per_sec"` - RecoveryBps uint64 `json:"recovering_bytes_per_sec"` - RecoveryObjectsPerSec uint64 `json:"recovering_objects_per_sec"` - RecoveryKeysPerSec uint64 `json:"recovering_keys_per_sec"` - CacheFlushBps uint64 `json:"flush_bytes_sec"` - CacheEvictBps uint64 `json:"evict_bytes_sec"` - CachePromoteBps uint64 `json:"promote_op_per_sec"` -} - -type PgStateEntry struct { - StateName string `json:"state_name"` - Count int `json:"count"` -} - -// Fsmap is a struct representing the filesystem map -type Fsmap struct { - Epoch int `json:"epoch"` - ID int `json:"id"` - Up int `json:"up"` - In int `json:"in"` - Max int `json:"max"` - ByRank []struct { - FilesystemID int `json:"filesystem_id"` - Rank int `json:"rank"` - Name string `json:"name"` - Status string `json:"status"` - Gid int `json:"gid"` - } `json:"by_rank"` - UpStandby int `json:"up:standby"` -} - -func Status(context *clusterd.Context, clusterInfo *ClusterInfo) (CephStatus, error) { - args := []string{"status"} - cmd := NewCephCommand(context, clusterInfo, args) - buf, err := cmd.Run() - if err != nil { - return CephStatus{}, errors.Wrapf(err, "failed to get status. %s", string(buf)) - } - - var status CephStatus - if err := json.Unmarshal(buf, &status); err != nil { - return CephStatus{}, errors.Wrap(err, "failed to unmarshal status response") - } - - return status, nil -} - -func StatusWithUser(context *clusterd.Context, clusterInfo *ClusterInfo) (CephStatus, error) { - args := []string{"status", "--format", "json"} - command, args := FinalizeCephCommandArgs("ceph", clusterInfo, args, context.ConfigDir) - - buf, err := context.Executor.ExecuteCommandWithOutput(command, args...) - if err != nil { - if buf != "" { - return CephStatus{}, errors.Wrapf(err, "failed to get status. %s", string(buf)) - } - return CephStatus{}, errors.Wrap(err, "failed to get ceph status") - } - - var status CephStatus - if err := json.Unmarshal([]byte(buf), &status); err != nil { - return CephStatus{}, errors.Wrap(err, "failed to unmarshal status response") - } - - return status, nil -} - -// IsClusterClean returns msg (string), clean (bool), err (error) -// msg describes the state of the PGs -// clean is true if the cluster is clean -// err is not nil if getting the status failed. -func IsClusterClean(context *clusterd.Context, clusterInfo *ClusterInfo) (string, bool, error) { - status, err := Status(context, clusterInfo) - if err != nil { - return "unable to get PG health", false, err - } - msg, clean := isClusterClean(status) - if !clean { - return msg, false, nil - } - return msg, true, nil -} - -// IsClusterCleanError returns an error indicating if the cluster is fully clean yet (i.e., all placement -// groups are in the active+clean state). It returns nil if the cluster is clean. -// Using IsClusterClean is recommended if you want to differentiate between a failure of the status query and -// an unclean cluster. -func IsClusterCleanError(context *clusterd.Context, clusterInfo *ClusterInfo) error { - msg, clean, err := IsClusterClean(context, clusterInfo) - if err != nil { - return err - } - if !clean { - return errors.New(msg) - } - return nil -} - -func isClusterClean(status CephStatus) (string, bool) { - if status.PgMap.NumPgs == 0 { - // there are no PGs yet, that still counts as clean - return "cluster has no PGs", true - } - - cleanPGs := 0 - for _, pg := range status.PgMap.PgsByState { - if pg.StateName == activeClean || pg.StateName == activeCleanScrubbing || pg.StateName == activeCleanScrubbingDeep { - cleanPGs += pg.Count - } - } - if cleanPGs == status.PgMap.NumPgs { - // all PGs in the cluster are in a clean state - logger.Debugf("all placement groups have reached a clean state: %+v", status.PgMap.PgsByState) - return "all PGs in cluster are clean", true - } - - return fmt.Sprintf("cluster is not fully clean. PGs: %+v", status.PgMap.PgsByState), false -} - -// getMDSRank returns the rank of a given MDS -func getMDSRank(status CephStatus, fsName string) (int, error) { - // dummy rank - mdsRank := -1000 - for r := range status.Fsmap.ByRank { - if status.Fsmap.ByRank[r].Name == fsName { - mdsRank = r - } - } - // if the mds is not shown in the map one reason might be because it's in standby - // if not in standby there is something else going wrong - if mdsRank < 0 && status.Fsmap.UpStandby < 1 { - // it might seem strange to log an error since this could be a warning too - // it is a warning until we reach the timeout, this should give enough time to the mds to transition its state - // after the timeout we consider that the mds might be gone or the timeout was not long enough... - return mdsRank, errors.Errorf("mds %s not found in fsmap, this likely means mdss are transitioning between active and standby states", fsName) - } - - return mdsRank, nil -} - -// MdsActiveOrStandbyReplay returns whether a given MDS is active or in standby -func MdsActiveOrStandbyReplay(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string) error { - status, err := Status(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get ceph status") - } - - mdsRank, err := getMDSRank(status, fsName) - if err != nil { - return errors.Cause(err) - } - - // this MDS is in standby so let's return immediately - if mdsRank < 0 { - logger.Infof("mds %s is in standby, nothing to check", fsName) - return nil - } - - if status.Fsmap.ByRank[mdsRank].Status == "up:active" || status.Fsmap.ByRank[mdsRank].Status == "up:standby-replay" || status.Fsmap.ByRank[mdsRank].Status == "up:standby" { - logger.Infof("mds %s is %s", fsName, status.Fsmap.ByRank[mdsRank].Status) - return nil - } - - return errors.Errorf("mds %s is %s, bad state", fsName, status.Fsmap.ByRank[mdsRank].Status) -} - -// IsCephHealthy verifies Ceph is healthy, useful when performing an upgrade -// check if it's a minor or major upgrade... too! -func IsCephHealthy(context *clusterd.Context, clusterInfo *ClusterInfo) bool { - cephStatus, err := Status(context, clusterInfo) - if err != nil { - logger.Errorf("failed to detect if Ceph is healthy. failed to get ceph status. %v", err) - return false - } - - return isCephHealthy(cephStatus) -} - -func isCephHealthy(status CephStatus) bool { - s := status.Health.Status - if s == "HEALTH_WARN" || s == "HEALTH_OK" { - return true - } - - return false -} diff --git a/pkg/daemon/ceph/client/status_test.go b/pkg/daemon/ceph/client/status_test.go deleted file mode 100644 index 5da72e2ce..000000000 --- a/pkg/daemon/ceph/client/status_test.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package client - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -const ( - // this JSON was generated from `ceph status -f json`, using Ceph Luminous 12.1.3 - CephStatusResponseRaw = `{"fsid":"613975f3-3025-4802-9de1-a2280b950e75","health":{"checks":{"OSD_DOWN":{"severity":"HEALTH_WARN","summary":{"message":"1 osds down"}},"OSD_HOST_DOWN":{"severity":"HEALTH_WARN","summary":{"message":"1 host (1 osds) down"}},"PG_AVAILABILITY":{"severity":"HEALTH_WARN","summary":{"message":"Reduced data availability: 101 pgs stale"}},"POOL_APP_NOT_ENABLED":{"severity":"HEALTH_WARN","summary":{"message":"application not enabled on 1 pool(s)"}}},"status":"HEALTH_WARN","overall_status":"HEALTH_WARN"},"election_epoch":12,"quorum":[0,1,2],"quorum_names":["rook-ceph-mon0","rook-ceph-mon2","rook-ceph-mon1"],"monmap":{"epoch":3,"fsid":"613975f3-3025-4802-9de1-a2280b950e75","modified":"2017-08-11 20:13:02.075679","created":"2017-08-11 20:12:35.314510","features":{"persistent":["kraken","luminous"],"optional":[]},"mons":[{"rank":0,"name":"rook-ceph-mon0","addr":"10.3.0.45:6789/0","public_addr":"10.3.0.45:6789/0"},{"rank":1,"name":"rook-ceph-mon2","addr":"10.3.0.249:6789/0","public_addr":"10.3.0.249:6789/0"},{"rank":2,"name":"rook-ceph-mon1","addr":"10.3.0.252:6789/0","public_addr":"10.3.0.252:6789/0"}]},"osdmap":{"osdmap":{"epoch":17,"num_osds":2,"num_up_osds":1,"num_in_osds":2,"full":false,"nearfull":true,"num_remapped_pgs":0}},"pgmap":{"pgs_by_state":[{"state_name":"stale+active+clean","count":101},{"state_name":"active+clean","count":99}],"num_pgs":200,"num_pools":2,"num_objects":243,"data_bytes":976793635,"bytes_used":13611479040,"bytes_avail":19825307648,"bytes_total":33436786688},"fsmap":{"epoch":1,"by_rank":[]},"mgrmap":{"epoch":3,"active_gid":14111,"active_name":"rook-ceph-mgr0","active_addr":"10.2.73.6:6800/9","available":true,"standbys":[],"modules":["restful","status"],"available_modules":["dashboard","prometheus","restful","status","zabbix"]},"servicemap":{"epoch":1,"modified":"0.000000","services":{}}}` -) - -var ( - // this JSON was generated from `ceph status -f json`, using Ceph Nautilus 14.2.1 - // It was chopped to only show what the tests are looking for - statusFakeRaw = []byte(`{ - "fsmap": { - "epoch": 13, - "id": 1, - "up": 1, - "in": 1, - "max": 1, - "by_rank": [ - { - "filesystem_id": 1, - "rank": 0, - "name": "myfs-b", - "status": "up:active", - "gid": 14716 - } - ], - "up:standby": 1 - } - }`) -) - -func TestStatusMarshal(t *testing.T) { - var status CephStatus - err := json.Unmarshal([]byte(CephStatusResponseRaw), &status) - assert.Nil(t, err) - - // verify some health fields - assert.Equal(t, "HEALTH_WARN", status.Health.Status) - assert.Equal(t, 4, len(status.Health.Checks)) - assert.Equal(t, "HEALTH_WARN", status.Health.Checks["OSD_DOWN"].Severity) - assert.Equal(t, "1 osds down", status.Health.Checks["OSD_DOWN"].Summary.Message) - assert.Equal(t, "HEALTH_WARN", status.Health.Checks["OSD_HOST_DOWN"].Severity) - assert.Equal(t, "1 host (1 osds) down", status.Health.Checks["OSD_HOST_DOWN"].Summary.Message) - - // verify some Mon map fields - assert.Equal(t, 3, status.MonMap.Epoch) - assert.Equal(t, "rook-ceph-mon0", status.MonMap.Mons[0].Name) - assert.Equal(t, "10.3.0.45:6789/0", status.MonMap.Mons[0].Address) - - // verify some OSD map fields - assert.Equal(t, 2, status.OsdMap.OsdMap.NumOsd) - assert.Equal(t, 1, status.OsdMap.OsdMap.NumUpOsd) - assert.False(t, status.OsdMap.OsdMap.Full) - assert.True(t, status.OsdMap.OsdMap.NearFull) - - // verify some PG map fields - assert.Equal(t, 200, status.PgMap.NumPgs) - assert.Equal(t, uint64(13611479040), status.PgMap.UsedBytes) - assert.Equal(t, 101, status.PgMap.PgsByState[0].Count) - assert.Equal(t, "stale+active+clean", status.PgMap.PgsByState[0].StateName) -} - -func TestIsClusterClean(t *testing.T) { - status := CephStatus{ - PgMap: PgMap{ - PgsByState: []PgStateEntry{ - {StateName: activeClean, Count: 3}, - }, - NumPgs: 10, - }, - } - - // not a clean cluster with PGs not adding up - _, clean := isClusterClean(status) - assert.False(t, clean) - - // clean cluster - status.PgMap.PgsByState = append(status.PgMap.PgsByState, - PgStateEntry{StateName: activeCleanScrubbing, Count: 5}) - status.PgMap.PgsByState = append(status.PgMap.PgsByState, - PgStateEntry{StateName: activeCleanScrubbingDeep, Count: 2}) - _, clean = isClusterClean(status) - assert.True(t, clean) - - // not a clean cluster with PGs in a bad state - status.PgMap.PgsByState[0].StateName = "notclean" - _, clean = isClusterClean(status) - assert.False(t, clean) -} - -func TestGetMDSRank(t *testing.T) { - var statusFake CephStatus - err := json.Unmarshal(statusFakeRaw, &statusFake) - assert.NoError(t, err) - - mdsRankFake, err := getMDSRank(statusFake, "myfs-b") - assert.Nil(t, err) - assert.Equal(t, 0, mdsRankFake) -} - -func TestIsCephHealthy(t *testing.T) { - var statusFake CephStatus - err := json.Unmarshal(statusFakeRaw, &statusFake) - assert.NoError(t, err) - - statusFake.Health.Status = "HEALTH_WARN" - s := isCephHealthy(statusFake) - assert.True(t, s) - - statusFake.Health.Status = "HEALTH_OK" - s = isCephHealthy(statusFake) - assert.True(t, s) - - statusFake.Health.Status = "HEALTH_ERR" - s = isCephHealthy(statusFake) - assert.False(t, s) -} diff --git a/pkg/daemon/ceph/client/test/info.go b/pkg/daemon/ceph/client/test/info.go deleted file mode 100644 index 6efd21917..000000000 --- a/pkg/daemon/ceph/client/test/info.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - "fmt" - "io/ioutil" - "os" - "path" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/daemon/ceph/client" -) - -func CreateConfigDir(configDir string) error { - if err := os.MkdirAll(configDir, 0700); err != nil { - return errors.Wrap(err, "error while creating directory") - } - if err := ioutil.WriteFile(path.Join(configDir, "client.admin.keyring"), []byte("key = adminsecret"), 0600); err != nil { - return errors.Wrap(err, "admin writefile error") - } - if err := ioutil.WriteFile(path.Join(configDir, "mon.keyring"), []byte("key = monsecret"), 0600); err != nil { - return errors.Wrap(err, "mon writefile error") - } - return nil -} - -// CreateTestClusterInfo creates a test cluster info -// This would be best in a test package, but is included here to avoid cyclic dependencies -func CreateTestClusterInfo(monCount int) *client.ClusterInfo { - ownerInfo := client.NewMinimumOwnerInfoWithOwnerRef() - c := &client.ClusterInfo{ - FSID: "12345", - Namespace: "default", - MonitorSecret: "monsecret", - CephCred: client.CephCred{ - Username: client.AdminUsername, - Secret: "adminkey", - }, - Monitors: map[string]*client.MonInfo{}, - OwnerInfo: ownerInfo, - } - mons := []string{"a", "b", "c", "d", "e"} - for i := 0; i < monCount; i++ { - id := mons[i] - c.Monitors[id] = &client.MonInfo{ - Name: id, - Endpoint: fmt.Sprintf("1.2.3.%d:6789", (i + 1)), - } - } - c.SetName(c.Namespace) - return c -} diff --git a/pkg/daemon/ceph/client/test/mon.go b/pkg/daemon/ceph/client/test/mon.go deleted file mode 100644 index 8f7e5d4d3..000000000 --- a/pkg/daemon/ceph/client/test/mon.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package test - -import ( - "encoding/json" - "fmt" - - "github.com/rook/rook/pkg/daemon/ceph/client" -) - -func MonInQuorumResponse() string { - resp := client.MonStatusResponse{Quorum: []int{0}} - resp.MonMap.Mons = []client.MonMapEntry{ - { - Name: "a", - Rank: 0, - Address: "1.2.3.1", - }, - } - serialized, _ := json.Marshal(resp) - return string(serialized) -} - -func MonInQuorumResponseFromMons(mons map[string]*client.MonInfo) string { - resp := client.MonStatusResponse{Quorum: []int{}} - i := 0 - for name := range mons { - resp.MonMap.Mons = append(resp.MonMap.Mons, client.MonMapEntry{ - Name: name, - Rank: i, - Address: fmt.Sprintf("1.2.3.%d", i), - }) - resp.Quorum = append(resp.Quorum, i) - i++ - } - serialized, _ := json.Marshal(resp) - return string(serialized) -} - -func MonInQuorumResponseMany(count int) string { - resp := client.MonStatusResponse{Quorum: []int{0}} - resp.MonMap.Mons = []client.MonMapEntry{} - for i := 0; i <= count; i++ { - resp.MonMap.Mons = append(resp.MonMap.Mons, client.MonMapEntry{ - Name: fmt.Sprintf("rook-ceph-mon%d", i), - Rank: 0, - Address: fmt.Sprintf("1.2.3.%d", i), - }) - } - serialized, _ := json.Marshal(resp) - return string(serialized) -} diff --git a/pkg/daemon/ceph/client/upgrade.go b/pkg/daemon/ceph/client/upgrade.go deleted file mode 100644 index 5fee97e7b..000000000 --- a/pkg/daemon/ceph/client/upgrade.go +++ /dev/null @@ -1,442 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - "strings" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/util" -) - -const ( - defaultMaxRetries = 10 - defaultRetryDelay = 60 * time.Second - defaultOSDRetryDelay = 10 * time.Second -) - -var ( - // we don't perform any checks on these daemons - // they don't have any "ok-to-stop" command implemented - daemonNoCheck = []string{"mgr", "rgw", "rbd-mirror", "nfs", "fs-mirror"} - errNoHostInCRUSH = errors.New("no host in crush map yet?") -) - -func getCephMonVersionString(context *clusterd.Context, clusterInfo *ClusterInfo) (string, error) { - args := []string{"version"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return "", errors.Wrap(err, "failed to run 'ceph version'") - } - output := string(buf) - logger.Debug(output) - - return output, nil -} - -func getAllCephDaemonVersionsString(context *clusterd.Context, clusterInfo *ClusterInfo) (string, error) { - args := []string{"versions"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return "", errors.Wrapf(err, "failed to run 'ceph versions'. %s", string(buf)) - } - output := string(buf) - logger.Debug(output) - - return output, nil -} - -// GetCephMonVersion reports the Ceph version of all the monitors, or at least a majority with quorum -func GetCephMonVersion(context *clusterd.Context, clusterInfo *ClusterInfo) (*cephver.CephVersion, error) { - output, err := getCephMonVersionString(context, clusterInfo) - if err != nil { - return nil, err - } - logger.Debug(output) - - v, err := cephver.ExtractCephVersion(output) - if err != nil { - return nil, errors.Wrap(err, "failed to extract ceph version") - } - - return v, nil -} - -// GetAllCephDaemonVersions reports the Ceph version of each daemon in the cluster -func GetAllCephDaemonVersions(context *clusterd.Context, clusterInfo *ClusterInfo) (*cephv1.CephDaemonsVersions, error) { - output, err := getAllCephDaemonVersionsString(context, clusterInfo) - if err != nil { - return nil, err - } - logger.Debug(output) - - var cephVersionsResult cephv1.CephDaemonsVersions - err = json.Unmarshal([]byte(output), &cephVersionsResult) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve ceph versions results") - } - - return &cephVersionsResult, nil -} - -// EnableMessenger2 enable the messenger 2 protocol on Nautilus clusters -func EnableMessenger2(context *clusterd.Context, clusterInfo *ClusterInfo) error { - args := []string{"mon", "enable-msgr2"} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrap(err, "failed to enable msgr2 protocol") - } - output := string(buf) - logger.Debug(output) - logger.Infof("successfully enabled msgr2 protocol") - - return nil -} - -// EnableReleaseOSDFunctionality disallows pre-Nautilus OSDs and enables all new Nautilus-only functionality -func EnableReleaseOSDFunctionality(context *clusterd.Context, clusterInfo *ClusterInfo, release string) error { - args := []string{"osd", "require-osd-release", release} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "failed to disallow pre-%s osds and enable all new %s-only functionality", release, release) - } - output := string(buf) - logger.Debug(output) - logger.Infof("successfully disallowed pre-%s osds and enabled all new %s-only functionality", release, release) - - return nil -} - -// OkToStop determines if it's ok to stop an upgrade -func OkToStop(context *clusterd.Context, clusterInfo *ClusterInfo, deployment, daemonType, daemonName string) error { - okToStopRetries, okToStopDelay := getRetryConfig(clusterInfo, daemonType) - versions, err := GetAllCephDaemonVersions(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get ceph daemons versions") - } - - switch daemonType { - // Trying to handle the case where a **single** mon is deployed and an upgrade is called - case "mon": - // if len(versions.Mon) > 1, this means we have different Ceph versions for some monitor(s). - // This is fine, we can run the upgrade checks - if len(versions.Mon) == 1 { - // now trying to parse and find how many mons are presents - // if we have less than 3 mons we skip the check and do best-effort - // we do less than 3 because during the initial bootstrap the mon sequence is updated too - // so running running the check on 2/3 mon fails - // versions.Mon looks like this map[ceph version 15.0.0-12-g6c8fb92 (6c8fb920cb1d862f36ee852ed849a15f9a50bd68) octopus (dev):1] - // now looping over a single element since we can't address the key directly (we don't know its name) - for _, monCount := range versions.Mon { - if monCount < 3 { - logger.Infof("the cluster has fewer than 3 monitors, not performing upgrade check, running in best-effort") - return nil - } - } - } - // Trying to handle the case where a **single** osd is deployed and an upgrade is called - case "osd": - if osdDoNothing(context, clusterInfo) { - return nil - } - } - - // we don't implement any checks for mon, rgw and rbdmirror since: - // - mon: the is done in the monitor code since it ensures all the mons are always in quorum before continuing - // - rgw: the pod spec has a liveness probe so if the pod successfully start - // - rbdmirror: you can chain as many as you want like mdss but there is no ok-to-stop logic yet - err = util.Retry(okToStopRetries, okToStopDelay, func() error { - return okToStopDaemon(context, clusterInfo, deployment, daemonType, daemonName) - }) - if err != nil { - return errors.Wrapf(err, "failed to check if %s was ok to stop", deployment) - } - - return nil -} - -// OkToContinue determines if it's ok to continue an upgrade -func OkToContinue(context *clusterd.Context, clusterInfo *ClusterInfo, deployment, daemonType, daemonName string) error { - // the mon case is handled directly in the deployment where the mon checks for quorum - switch daemonType { - case "mds": - err := okToContinueMDSDaemon(context, clusterInfo, deployment, daemonType, daemonName) - if err != nil { - return errors.Wrapf(err, "failed to check if %s was ok to continue", deployment) - } - } - - return nil -} - -func okToStopDaemon(context *clusterd.Context, clusterInfo *ClusterInfo, deployment, daemonType, daemonName string) error { - if !StringInSlice(daemonType, daemonNoCheck) { - args := []string{daemonType, "ok-to-stop", daemonName} - buf, err := NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return errors.Wrapf(err, "deployment %s cannot be stopped", deployment) - } - output := string(buf) - logger.Debugf("deployment %s is ok to be updated. %s", deployment, output) - } - - // At this point, we can't tell if the daemon is unknown or if - // but it's not a problem since perhaps it has no "ok-to-stop" call - // It's fine to return nil here - logger.Debugf("deployment %s is ok to be updated.", deployment) - - return nil -} - -// okToContinueMDSDaemon determines whether it's fine to go to the next mds during an upgrade -// mostly a placeholder function for the future but since we have standby mds this shouldn't be needed -func okToContinueMDSDaemon(context *clusterd.Context, clusterInfo *ClusterInfo, deployment, daemonType, daemonName string) error { - // wait for the MDS to be active again or in standby-replay - retries, delay := getRetryConfig(clusterInfo, "mds") - err := util.Retry(retries, delay, func() error { - return MdsActiveOrStandbyReplay(context, clusterInfo, findFSName(deployment)) - }) - if err != nil { - return err - } - - return nil -} - -// StringInSlice return whether an element is in a slice -func StringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// LeastUptodateDaemonVersion returns the ceph version of the least updated daemon type -// So if we invoke this method function with "mon", it will look for the least recent version -// Assume the following: -// -// "mon": { -// "ceph version 16.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) pacific (stable)": 2, -// "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 1 -// } -// -// In the case we will pick: "ceph version 16.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) pacific (stable)": 2, -// And eventually return 16.2.5 -func LeastUptodateDaemonVersion(context *clusterd.Context, clusterInfo *ClusterInfo, daemonType string) (cephver.CephVersion, error) { - var r map[string]int - var vv cephver.CephVersion - - // Always invoke ceph version before an upgrade so we are sure to be up-to-date - versions, err := GetAllCephDaemonVersions(context, clusterInfo) - if err != nil { - return vv, errors.Wrap(err, "failed to get ceph daemons versions") - } - - r, err = daemonMapEntry(versions, daemonType) - if err != nil { - return vv, errors.Wrap(err, "failed to find daemon map entry") - } - for v := range r { - version, err := cephver.ExtractCephVersion(v) - if err != nil { - return vv, errors.Wrap(err, "failed to extract ceph version") - } - vv = *version - // break right after the first iteration - // the first one is always the least up-to-date - break - } - - return vv, nil -} - -func findFSName(deployment string) string { - return strings.TrimPrefix(deployment, "rook-ceph-mds-") -} - -func daemonMapEntry(versions *cephv1.CephDaemonsVersions, daemonType string) (map[string]int, error) { - switch daemonType { - case "mon": - return versions.Mon, nil - case "mgr": - return versions.Mgr, nil - case "mds": - return versions.Mds, nil - case "osd": - return versions.Osd, nil - case "rgw": - return versions.Rgw, nil - case "mirror": - return versions.RbdMirror, nil - } - - return nil, errors.Errorf("invalid daemonType %s", daemonType) -} - -func allOSDsSameHost(context *clusterd.Context, clusterInfo *ClusterInfo) (bool, error) { - tree, err := HostTree(context, clusterInfo) - if err != nil { - return false, errors.Wrap(err, "failed to get the osd tree") - } - - osds, err := OsdListNum(context, clusterInfo) - if err != nil { - return false, errors.Wrap(err, "failed to get the osd list") - } - - hostOsdTree, err := buildHostListFromTree(tree) - if err != nil { - return false, errors.Wrap(err, "failed to build osd tree") - } - - hostOsdNodes := len(hostOsdTree.Nodes) - if hostOsdNodes == 0 { - return false, errNoHostInCRUSH - } - - // If the number of OSD node is 1, chances are this is simple setup with all OSDs on it - if hostOsdNodes == 1 { - // number of OSDs on that host - hostOsdNum := len(hostOsdTree.Nodes[0].Children) - // we take the total number of OSDs and remove the OSDs that are out of the CRUSH map - osdUp := len(osds) - len(tree.Stray) - // If the number of children of that host (basically OSDs) is equal to the total number of OSDs - // We can assume that all OSDs are running on the same machine - if hostOsdNum == osdUp { - return true, nil - } - } - - return false, nil -} - -func buildHostListFromTree(tree OsdTree) (OsdTree, error) { - var osdList OsdTree - - if tree.Nodes == nil { - return osdList, errors.New("osd tree not populated, missing 'nodes' field") - } - - for _, t := range tree.Nodes { - if t.Type == "host" { - osdList.Nodes = append(osdList.Nodes, t) - } - } - - return osdList, nil -} - -// OSDUpdateShouldCheckOkToStop returns true if Rook should check ok-to-stop for OSDs when doing -// OSD daemon updates. It will return false if it should not perform ok-to-stop checks, for example, -// when there are fewer than 3 OSDs -func OSDUpdateShouldCheckOkToStop(context *clusterd.Context, clusterInfo *ClusterInfo) bool { - userIntervention := "the user will likely need to set continueUpgradeAfterChecksEvenIfNotHealthy to allow OSD updates to proceed" - - osds, err := OsdListNum(context, clusterInfo) - if err != nil { - // If calling osd list fails, we assume there are more than 3 OSDs and we check if ok-to-stop - // If there are less than 3 OSDs, the ok-to-stop call will fail - // this can still be controlled by setting continueUpgradeAfterChecksEvenIfNotHealthy - // At least this will happen for a single OSD only, which means 2 OSDs will restart in a small interval - logger.Warningf("failed to determine the total number of osds. will check if OSDs are ok-to-stop. if there are fewer than 3 OSDs %s. %v", userIntervention, err) - return true - } - if len(osds) < 3 { - logger.Warningf("the cluster has fewer than 3 osds. not performing upgrade check. running in best-effort") - return false - } - - // aio means all in one - aio, err := allOSDsSameHost(context, clusterInfo) - if err != nil { - if errors.Is(err, errNoHostInCRUSH) { - logger.Warning("the CRUSH map has no 'host' entries so not performing ok-to-stop checks") - return false - } - logger.Warningf("failed to determine if all osds are running on the same host. will check if OSDs are ok-to-stop. if all OSDs are running on one host %s. %v", userIntervention, err) - return true - } - if aio { - logger.Warningf("all OSDs are running on the same host. not performing upgrade check. running in best-effort") - return false - } - - return true -} - -// osdDoNothing determines whether we should perform upgrade pre-check and post-checks for the OSD daemon -// it checks for various cluster info like number of OSD and their placement -// it returns 'true' if we need to do nothing and false and we should pre-check/post-check -func osdDoNothing(context *clusterd.Context, clusterInfo *ClusterInfo) bool { - osds, err := OsdListNum(context, clusterInfo) - if err != nil { - logger.Warningf("failed to determine the total number of osds. will check if the osd is ok-to-stop anyways. %v", err) - // If calling osd list fails, we assume there are more than 3 OSDs and we check if ok-to-stop - // If there are less than 3 OSDs, the ok-to-stop call will fail - // this can still be controlled by setting continueUpgradeAfterChecksEvenIfNotHealthy - // At least this will happen for a single OSD only, which means 2 OSDs will restart in a small interval - return false - } - if len(osds) < 3 { - logger.Warningf("the cluster has fewer than 3 osds, not performing upgrade check, running in best-effort") - return true - } - - // aio means all in one - aio, err := allOSDsSameHost(context, clusterInfo) - if err != nil { - // We return true so that we can continue without a retry and subsequently not test if the - // osd can be stopped This handles the scenario where the OSDs have been created but not yet - // started due to a wrong CR configuration For instance, when OSDs are encrypted and Vault - // is used to store encryption keys, if the KV version is incorrect during the cluster - // initialization the OSDs will fail to start and stay in CLBO until the CR is updated again - // with the correct KV version so that it can start For this scenario we don't need to go - // through the path where the check whether the OSD can be stopped or not, so it will always - // fail and make us wait for nothing - if errors.Is(err, errNoHostInCRUSH) { - logger.Warning("the CRUSH map has no 'host' entries so not performing ok-to-stop checks") - return true - } - logger.Warningf("failed to determine if all osds are running on the same host, performing upgrade check anyways. %v", err) - return false - } - - if aio { - logger.Warningf("all OSDs are running on the same host, not performing upgrade check, running in best-effort") - return true - } - - return false -} - -func getRetryConfig(clusterInfo *ClusterInfo, daemonType string) (int, time.Duration) { - switch daemonType { - case "osd": - return int(clusterInfo.OsdUpgradeTimeout / defaultOSDRetryDelay), defaultOSDRetryDelay - case "mds": - return defaultMaxRetries, 15 * time.Second - } - - return defaultMaxRetries, defaultRetryDelay -} diff --git a/pkg/daemon/ceph/client/upgrade_test.go b/pkg/daemon/ceph/client/upgrade_test.go deleted file mode 100644 index 2035d84bb..000000000 --- a/pkg/daemon/ceph/client/upgrade_test.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "encoding/json" - "testing" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client/fake" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestGetCephMonVersionString(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - assert.Equal(t, "version", args[0]) - return "", nil - } - context := &clusterd.Context{Executor: executor} - - _, err := getCephMonVersionString(context, AdminClusterInfo("mycluster")) - assert.NoError(t, err) -} - -func TestGetCephMonVersionsString(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - assert.Equal(t, "versions", args[0]) - return "", nil - } - context := &clusterd.Context{Executor: executor} - - _, err := getAllCephDaemonVersionsString(context, AdminClusterInfo("mycluster")) - assert.Nil(t, err) -} - -func TestEnableMessenger2(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - assert.Equal(t, "mon", args[0]) - assert.Equal(t, "enable-msgr2", args[1]) - return "", nil - } - context := &clusterd.Context{Executor: executor} - - err := EnableMessenger2(context, AdminClusterInfo("mycluster")) - assert.NoError(t, err) -} - -func TestEnableReleaseOSDFunctionality(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - assert.Equal(t, "osd", args[0]) - assert.Equal(t, "require-osd-release", args[1]) - return "", nil - } - context := &clusterd.Context{Executor: executor} - - err := EnableReleaseOSDFunctionality(context, AdminClusterInfo("mycluster"), "nautilus") - assert.NoError(t, err) -} - -func TestOkToStopDaemon(t *testing.T) { - // First test - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - switch { - case args[0] == "mon" && args[1] == "ok-to-stop" && args[2] == "a": - return "", nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - context := &clusterd.Context{Executor: executor} - - deployment := "rook-ceph-mon-a" - err := okToStopDaemon(context, AdminClusterInfo("mycluster"), deployment, "mon", "a") - assert.NoError(t, err) - - // Second test - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - assert.Equal(t, "mgr", args[0]) - assert.Equal(t, "ok-to-stop", args[1]) - assert.Equal(t, "a", args[2]) - return "", nil - } - context = &clusterd.Context{Executor: executor} - - deployment = "rook-ceph-mgr-a" - err = okToStopDaemon(context, AdminClusterInfo("mycluster"), deployment, "mgr", "a") - assert.NoError(t, err) - - // Third test - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - assert.Equal(t, "dummy", args[0]) - assert.Equal(t, "ok-to-stop", args[1]) - assert.Equal(t, "a", args[2]) - return "", nil - } - context = &clusterd.Context{Executor: executor} - - deployment = "rook-ceph-dummy-a" - err = okToStopDaemon(context, AdminClusterInfo("mycluster"), deployment, "dummy", "a") - assert.NoError(t, err) -} - -func TestOkToContinue(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - - err := OkToContinue(context, AdminClusterInfo("mycluster"), "rook-ceph-mon-a", "mon", "a") // mon is not checked on ok-to-continue so nil is expected - assert.NoError(t, err) -} - -func TestFindFSName(t *testing.T) { - fsName := findFSName("rook-ceph-mds-myfs-a") - assert.Equal(t, "myfs-a", fsName) - fsName = findFSName("rook-ceph-mds-my-super-fs-a") - assert.Equal(t, "my-super-fs-a", fsName) -} - -func TestDaemonMapEntry(t *testing.T) { - dummyVersionsRaw := []byte(` - { - "mon": { - "ceph version 16.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) pacific (stable)": 1, - "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2 - } - }`) - - var dummyVersions cephv1.CephDaemonsVersions - err := json.Unmarshal([]byte(dummyVersionsRaw), &dummyVersions) - assert.NoError(t, err) - - m, err := daemonMapEntry(&dummyVersions, "mon") - assert.NoError(t, err) - assert.Equal(t, dummyVersions.Mon, m) - - _, err = daemonMapEntry(&dummyVersions, "dummy") - assert.Error(t, err) -} - -func TestBuildHostListFromTree(t *testing.T) { - dummyOsdTreeRaw := []byte(` - { - "nodes": [ - { - "id": -3, - "name": "r1", - "type": "rack", - "type_id": 3, - "children": [ - -4 - ] - }, - { - "id": -4, - "name": "ceph-nano-oooooo", - "type": "host", - "type_id": 1, - "pool_weights": {}, - "children": [ - 0 - ] - }, - { - "id": 0, - "name": "osd.0", - "type": "osd", - "type_id": 0, - "crush_weight": 0.009796, - "depth": 2, - "pool_weights": {}, - "exists": 1, - "status": "up", - "reweight": 1, - "primary_affinity": 1 - }, - { - "id": -1, - "name": "default", - "type": "root", - "type_id": 10, - "children": [ - -2 - ] - }, - { - "id": -2, - "name": "ceph-nano-nau-faa32aebf00b", - "type": "host", - "type_id": 1, - "pool_weights": {}, - "children": [] - } - ], - "stray": [ - { - "id": 1, - "name": "osd.1", - "type": "osd", - "type_id": 0, - "crush_weight": 0, - "depth": 0, - "exists": 1, - "status": "down", - "reweight": 0, - "primary_affinity": 1 - } - ] - }`) - - var dummyTree OsdTree - err := json.Unmarshal([]byte(dummyOsdTreeRaw), &dummyTree) - assert.NoError(t, err) - - osdHosts, err := buildHostListFromTree(dummyTree) - assert.NoError(t, err) - assert.Equal(t, 2, len(osdHosts.Nodes)) - - dummyEmptyOsdTreeRaw := []byte(`{}`) - var dummyEmptyTree OsdTree - err = json.Unmarshal([]byte(dummyEmptyOsdTreeRaw), &dummyEmptyTree) - assert.NoError(t, err) - - _, err = buildHostListFromTree(dummyEmptyTree) - assert.Error(t, err) - - dummyEmptyNodeOsdTreeRaw := []byte(`{"nodes": []}`) - var dummyEmptyNodeTree OsdTree - err = json.Unmarshal([]byte(dummyEmptyNodeOsdTreeRaw), &dummyEmptyNodeTree) - assert.NoError(t, err) - - _, err = buildHostListFromTree(dummyEmptyNodeTree) - assert.NoError(t, err) -} - -func TestGetRetryConfig(t *testing.T) { - testcases := []struct { - label string - clusterInfo *ClusterInfo - daemonType string - expectedRetries int - expectedDelay time.Duration - }{ - { - label: "case 1: mon daemon", - clusterInfo: &ClusterInfo{}, - daemonType: "mon", - expectedRetries: 10, - expectedDelay: 60 * time.Second, - }, - { - label: "case 2: osd daemon with 5 minutes delay", - clusterInfo: &ClusterInfo{OsdUpgradeTimeout: 5 * time.Minute}, - daemonType: "osd", - expectedRetries: 30, - expectedDelay: 10 * time.Second, - }, - { - label: "case 3: osd daemon with 10 minutes delay", - clusterInfo: &ClusterInfo{OsdUpgradeTimeout: 10 * time.Minute}, - daemonType: "osd", - expectedRetries: 60, - expectedDelay: 10 * time.Second, - }, - { - label: "case 4: mds daemon", - clusterInfo: &ClusterInfo{}, - daemonType: "mds", - expectedRetries: 10, - expectedDelay: 15 * time.Second, - }, - } - - for _, tc := range testcases { - actualRetries, actualDelay := getRetryConfig(tc.clusterInfo, tc.daemonType) - - assert.Equal(t, tc.expectedRetries, actualRetries, "[%s] failed to get correct retry count", tc.label) - assert.Equalf(t, tc.expectedDelay, actualDelay, "[%s] failed to get correct delays between retries", tc.label) - } -} - -func TestOSDUpdateShouldCheckOkToStop(t *testing.T) { - clusterInfo := &ClusterInfo{} - lsOutput := "" - treeOutput := "" - context := &clusterd.Context{ - Executor: &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - t.Logf("command: %s %v", command, args) - if command != "ceph" || args[0] != "osd" { - panic("not a 'ceph osd' call") - } - if args[1] == "tree" { - if treeOutput == "" { - return "", errors.Errorf("induced error") - } - return treeOutput, nil - } - if args[1] == "ls" { - if lsOutput == "" { - return "", errors.Errorf("induced error") - } - return lsOutput, nil - } - panic("do not understand command") - }, - }, - } - - t.Run("3 nodes with 1 OSD each", func(t *testing.T) { - lsOutput = fake.OsdLsOutput(3) - treeOutput = fake.OsdTreeOutput(3, 1) - assert.True(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) - }) - - t.Run("1 node with 3 OSDs", func(t *testing.T) { - lsOutput = fake.OsdLsOutput(3) - treeOutput = fake.OsdTreeOutput(1, 3) - assert.False(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) - }) - - t.Run("2 nodes with 1 OSD each", func(t *testing.T) { - lsOutput = fake.OsdLsOutput(2) - treeOutput = fake.OsdTreeOutput(2, 1) - assert.False(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) - }) - - t.Run("3 nodes with 3 OSDs each", func(t *testing.T) { - lsOutput = fake.OsdLsOutput(9) - treeOutput = fake.OsdTreeOutput(3, 3) - assert.True(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) - }) - - // degraded case but good to test just in case - t.Run("0 nodes", func(t *testing.T) { - lsOutput = fake.OsdLsOutput(0) - treeOutput = fake.OsdTreeOutput(0, 0) - assert.False(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) - }) - - // degraded case, OSDs are failing to start so they haven't registered in the CRUSH map yet - t.Run("0 nodes with down OSDs", func(t *testing.T) { - lsOutput = fake.OsdLsOutput(3) - treeOutput = fake.OsdTreeOutput(0, 1) - assert.False(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) - }) -} diff --git a/pkg/daemon/ceph/osd/agent.go b/pkg/daemon/ceph/osd/agent.go deleted file mode 100644 index eee03661d..000000000 --- a/pkg/daemon/ceph/osd/agent.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" - "github.com/rook/rook/pkg/operator/k8sutil" -) - -const ( - unassignedOSDID = -1 -) - -// OsdAgent represents the OSD struct of an agent -type OsdAgent struct { - clusterInfo *cephclient.ClusterInfo - nodeName string - forceFormat bool - devices []DesiredDevice - metadataDevice string - storeConfig config.StoreConfig - kv *k8sutil.ConfigMapKVStore - pvcBacked bool -} - -// NewAgent is the instantiation of the OSD agent -func NewAgent(context *clusterd.Context, devices []DesiredDevice, metadataDevice string, forceFormat bool, - storeConfig config.StoreConfig, clusterInfo *cephclient.ClusterInfo, nodeName string, kv *k8sutil.ConfigMapKVStore, pvcBacked bool) *OsdAgent { - - return &OsdAgent{ - devices: devices, - metadataDevice: metadataDevice, - forceFormat: forceFormat, - storeConfig: storeConfig, - clusterInfo: clusterInfo, - nodeName: nodeName, - kv: kv, - pvcBacked: pvcBacked, - } -} - -func getDeviceLVPath(context *clusterd.Context, deviceName string) string { - output, err := context.Executor.ExecuteCommandWithOutput("pvdisplay", "-C", "-o", "lvpath", "--noheadings", deviceName) - if err != nil { - logger.Warningf("failed to retrieve logical volume path for %q. %v", deviceName, err) - return "" - } - logger.Debugf("logical volume path for device %q is %q", deviceName, output) - return output -} diff --git a/pkg/daemon/ceph/osd/daemon.go b/pkg/daemon/ceph/osd/daemon.go deleted file mode 100644 index 3eeabea34..000000000 --- a/pkg/daemon/ceph/osd/daemon.go +++ /dev/null @@ -1,509 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "fmt" - "os" - "os/signal" - "path/filepath" - "regexp" - "strings" - "syscall" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - oposd "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - "github.com/rook/rook/pkg/util/sys" -) - -const ( - pvcDataTypeDevice = "data" - pvcMetadataTypeDevice = "metadata" - pvcWalTypeDevice = "wal" - lvmCommandToCheck = "lvm" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "cephosd") -) - -// StartOSD starts an OSD on a device that was provisioned by ceph-volume -func StartOSD(context *clusterd.Context, osdType, osdID, osdUUID, lvPath string, pvcBackedOSD, lvBackedPV bool, cephArgs []string) error { - - // ensure the config mount point exists - configDir := fmt.Sprintf("/var/lib/ceph/osd/ceph-%s", osdID) - err := os.Mkdir(configDir, 0750) - if err != nil { - logger.Errorf("failed to create config dir %q. %v", configDir, err) - } - - // Update LVM config at runtime - if err := UpdateLVMConfig(context, pvcBackedOSD, lvBackedPV); err != nil { - return errors.Wrap(err, "failed to update lvm configuration file") // fail return here as validation provided by ceph-volume - } - - var volumeGroupName string - if pvcBackedOSD && !lvBackedPV { - volumeGroupName := getVolumeGroupName(lvPath) - if volumeGroupName == "" { - return errors.Wrapf(err, "error fetching volume group name for OSD %q", osdID) - } - - go handleTerminate(context, lvPath, volumeGroupName) - - // It's fine to continue if deactivate fails since we will return error if activate fails - if op, err := context.Executor.ExecuteCommandWithCombinedOutput("vgchange", "-an", "-vv", volumeGroupName); err != nil { - logger.Errorf("failed to deactivate volume group for lv %q. output: %s. %v", lvPath, op, err) - return nil - } - - if op, err := context.Executor.ExecuteCommandWithCombinedOutput("vgchange", "-ay", "-vv", volumeGroupName); err != nil { - return errors.Wrapf(err, "failed to activate volume group for lv %q. output: %s", lvPath, op) - } - } - - // activate the osd with ceph-volume - storeFlag := "--" + osdType - if err := context.Executor.ExecuteCommand("stdbuf", "-oL", "ceph-volume", "lvm", "activate", "--no-systemd", storeFlag, osdID, osdUUID); err != nil { - return errors.Wrap(err, "failed to activate osd") - } - - // run the ceph-osd daemon - if err := context.Executor.ExecuteCommand("ceph-osd", cephArgs...); err != nil { - // Instead of returning, we want to allow the lvm release to happen below, so we just log the err - logger.Errorf("failed to start osd or shutting down. %v", err) - } - - if pvcBackedOSD && !lvBackedPV { - if err := releaseLVMDevice(context, volumeGroupName); err != nil { - // Let's just report the error and not fail as a best-effort since some drivers will force detach anyway - // Failing to release the device does not means the detach will fail so let's proceed - logger.Errorf("failed to release device from lvm. %v", err) - return nil - } - } - - return nil -} - -func handleTerminate(context *clusterd.Context, lvPath, volumeGroupName string) { - sigc := make(chan os.Signal, 1) - signal.Notify(sigc, syscall.SIGTERM) - - <-sigc - logger.Infof("shutdown signal received, exiting...") - err := killCephOSDProcess(context, lvPath) - if err != nil { - logger.Errorf("failed to kill ceph-osd process. %v", err) - } -} - -func killCephOSDProcess(context *clusterd.Context, lvPath string) error { - - pid, err := context.Executor.ExecuteCommandWithOutput("fuser", "-a", lvPath) - if err != nil { - return errors.Wrapf(err, "failed to retrieve process ID for %q", lvPath) - } - - logger.Infof("process ID for ceph-osd: %s", pid) - - // shut down the osd-ceph process so that lvm release does not show device in use error. - if pid != "" { - // The OSD needs to exit as quickly as possible in order for the IO requests - // to be redirected to other OSDs in the cluster. The OSD is designed to tolerate failures - // of any kind, including power loss or kill -9. The upstream Ceph tests have for many years - // been testing with kill -9 so this is expected to be safe. There is a fix upstream Ceph that will - // improve the shutdown time of the OSD. For cleanliness we should consider removing the -9 - // once it is backported to Nautilus: https://github.com/ceph/ceph/pull/31677. - if err := context.Executor.ExecuteCommand("kill", "-9", pid); err != nil { - return errors.Wrap(err, "failed to kill ceph-osd process") - } - } - - return nil -} - -func configRawDevice(name string, context *clusterd.Context) (*sys.LocalDisk, error) { - rawDevice, err := clusterd.PopulateDeviceInfo(name, context.Executor) - if err != nil { - return nil, errors.Wrapf(err, "failed to get device info for %q", name) - } - - // set the device type: data, block_db(metadata) or wal. - if strings.HasPrefix(name, "/mnt") { - rawDevice, err = clusterd.PopulateDeviceUdevInfo(rawDevice.KernelName, context.Executor, rawDevice) - if err != nil { - logger.Warningf("failed to get udev info for device %q. %v", name, err) - } - rawDevice.Type = pvcDataTypeDevice - } else if strings.HasPrefix(name, "/srv") { - rawDevice.Type = pvcMetadataTypeDevice - } else if strings.HasPrefix(name, "/wal") { - rawDevice.Type = pvcWalTypeDevice - } - - return rawDevice, nil -} - -// Provision provisions an OSD -func Provision(context *clusterd.Context, agent *OsdAgent, crushLocation, topologyAffinity string) error { - if agent.pvcBacked { - // Init KMS store, retrieve the KEK and store it as an env var for ceph-volume - err := setKEKinEnv(context, agent.clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to set kek as an environment variable") - } - } - - // Print dmsetup version - err := dmsetupVersion(context) - if err != nil { - return errors.Wrap(err, "failed to print device mapper version") - } - - // set the initial orchestration status - status := oposd.OrchestrationStatus{Status: oposd.OrchestrationStatusOrchestrating} - oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status) - - if err := client.WriteCephConfig(context, agent.clusterInfo); err != nil { - return errors.Wrap(err, "failed to generate ceph config") - } - - logger.Infof("discovering hardware") - - var rawDevices []*sys.LocalDisk - if agent.pvcBacked { - for i := range agent.devices { - rawDevice, err := configRawDevice(agent.devices[i].Name, context) - if err != nil { - return err - } - - rawDevices = append(rawDevices, rawDevice) - } - } else { - // We still need to use 'lsblk' as the underlying way to discover devices - // Ideally, we would use the "ceph-volume inventory" command instead - // However, it suffers from some limitation such as exposing available partitions and LVs - // See: https://tracker.ceph.com/issues/43579 - rawDevices, err = clusterd.DiscoverDevices(context.Executor) - if err != nil { - return errors.Wrap(err, "failed initial hardware discovery") - } - } - - context.Devices = rawDevices - - logger.Info("creating and starting the osds") - - // determine the set of devices that can/should be used for OSDs. - devices, err := getAvailableDevices(context, agent) - if err != nil { - return errors.Wrap(err, "failed to get available devices") - } - - // orchestration is about to start, update the status - status = oposd.OrchestrationStatus{Status: oposd.OrchestrationStatusOrchestrating, PvcBackedOSD: agent.pvcBacked} - oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status) - - // start the desired OSDs on devices - logger.Infof("configuring osd devices: %+v", devices) - - deviceOSDs, err := agent.configureCVDevices(context, devices) - if err != nil { - return errors.Wrap(err, "failed to configure devices") - } - - // Let's fail if no OSDs were configured - // This likely means the filter for available devices passed (in PVC case) - // but the resulting device was already configured for another cluster (disk not wiped and leftover) - // So we need to make sure the list is filled up, otherwise fail - if len(deviceOSDs) == 0 { - logger.Warningf("skipping OSD configuration as no devices matched the storage settings for this node %q", agent.nodeName) - status = oposd.OrchestrationStatus{OSDs: deviceOSDs, Status: oposd.OrchestrationStatusCompleted, PvcBackedOSD: agent.pvcBacked} - oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status) - return nil - } - - // Populate CRUSH location for each OSD on the host - for i := range deviceOSDs { - deviceOSDs[i].Location = crushLocation - deviceOSDs[i].TopologyAffinity = topologyAffinity - } - - logger.Infof("devices = %+v", deviceOSDs) - - // Since we are done configuring the PVC we need to release it from LVM - // If we don't do this, the device will remain hold by LVM and we won't be able to detach it - // When running on PVC, the device is: - // * attached on the prepare pod - // * osd is mkfs - // * detached from the prepare pod - // * attached to the activate pod - // * then the OSD runs - if agent.pvcBacked && !deviceOSDs[0].SkipLVRelease && !deviceOSDs[0].LVBackedPV { - // Try to discover the VG of that LV - volumeGroupName := getVolumeGroupName(deviceOSDs[0].BlockPath) - - // If empty the osd is using the ceph-volume raw mode - // so it's consumming a raw block device and LVM is not used - // so there is nothing to de-activate - if volumeGroupName != "" { - if err := releaseLVMDevice(context, volumeGroupName); err != nil { - return errors.Wrap(err, "failed to release device from lvm") - } - } else { - // TODO - // don't assume this and run a bluestore check on the device to be sure? - logger.Infof("ceph-volume raw mode used by block %q, no VG to de-activate", deviceOSDs[0].BlockPath) - } - } - - // orchestration is completed, update the status - status = oposd.OrchestrationStatus{OSDs: deviceOSDs, Status: oposd.OrchestrationStatusCompleted, PvcBackedOSD: agent.pvcBacked} - oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status) - - return nil -} - -func getAvailableDevices(context *clusterd.Context, agent *OsdAgent) (*DeviceOsdMapping, error) { - desiredDevices := agent.devices - logger.Debugf("desiredDevices are %+v", desiredDevices) - - logger.Debug("context.Devices are:") - for _, disk := range context.Devices { - logger.Debugf("%+v", disk) - } - - available := &DeviceOsdMapping{Entries: map[string]*DeviceOsdIDEntry{}} - for _, device := range context.Devices { - // Ignore 'dm' device since they are not handled by c-v properly - // see: https://tracker.ceph.com/issues/43209 - if strings.HasPrefix(device.Name, sys.DeviceMapperPrefix) && device.Type == sys.LVMType { - logger.Infof("skipping 'dm' device %q", device.Name) - continue - } - - // Ignore device with filesystem signature since c-v inventory - // cannot detect that correctly - // see: https://tracker.ceph.com/issues/43585 - if device.Filesystem != "" { - // Allow further inspection of that device before skipping it - if device.Filesystem == "crypto_LUKS" && agent.pvcBacked { - if isCephEncryptedBlock(context, agent.clusterInfo.FSID, device.Name) { - logger.Infof("encrypted disk %q is an OSD part of this cluster, considering it", device.Name) - } - } else { - logger.Infof("skipping device %q because it contains a filesystem %q", device.Name, device.Filesystem) - continue - } - } - - if device.Type == sys.PartType { - // If we detect a partition we have to make sure that ceph-volume will be able to consume it - // ceph-volume version 14.2.8 has the right code to support partitions - if !agent.clusterInfo.CephVersion.IsAtLeast(cephVolumeRawModeMinCephVersion) { - logger.Infof("skipping device %q because it is a partition and ceph version is too old, you need at least ceph %q", device.Name, cephVolumeRawModeMinCephVersion.String()) - continue - } - - device, err := clusterd.PopulateDeviceUdevInfo(device.Name, context.Executor, device) - if err != nil { - logger.Errorf("failed to get udev info of partition %q. %v", device.Name, err) - continue - } - } - - // Check if the desired device is available - // - // We need to use the /dev path, provided by the NAME property from "lsblk --paths", - // especially when running on PVC and/or on dm device - // When running on PVC we use the real device name instead of the Kubernetes mountpoint - // When running on dm device we use the dm device name like "/dev/mapper/foo" instead of "/dev/dm-1" - // Otherwise ceph-volume inventory will fail on the udevadm check - // udevadm does not support device path different than /dev or /sys - // - // So earlier lsblk extracted the '/dev' path, hence the device.Name property - // device.Name can be 'xvdca', later this is formatted to '/dev/xvdca' - var err error - var isAvailable bool - rejectedReason := "" - if agent.pvcBacked { - block := fmt.Sprintf("/mnt/%s", agent.nodeName) - rawOsds, err := GetCephVolumeRawOSDs(context, agent.clusterInfo, agent.clusterInfo.FSID, block, agent.metadataDevice, "", false, true) - if err != nil { - isAvailable = false - rejectedReason = fmt.Sprintf("failed to detect if there is already an osd. %v", err) - } else if len(rawOsds) > 0 { - isAvailable = false - rejectedReason = "already in use by a raw OSD, no need to reconfigure" - } else { - isAvailable = true - } - } else { - isAvailable, rejectedReason, err = sys.CheckIfDeviceAvailable(context.Executor, device.RealPath, agent.pvcBacked) - if err != nil { - isAvailable = false - rejectedReason = fmt.Sprintf("failed to check if the device %q is available. %v", device.Name, err) - } - } - - if !isAvailable { - logger.Infof("skipping device %q: %s.", device.Name, rejectedReason) - continue - } else { - logger.Infof("device %q is available.", device.Name) - } - - var deviceInfo *DeviceOsdIDEntry - if agent.metadataDevice != "" && agent.metadataDevice == device.Name { - // current device is desired as the metadata device - deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID, Metadata: []int{}, DeviceInfo: device} - } else if len(desiredDevices) == 1 && desiredDevices[0].Name == "all" { - // user has specified all devices, use the current one for data - deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID, DeviceInfo: device} - } else if len(desiredDevices) > 0 { - var matched bool - var matchedDevice DesiredDevice - for _, desiredDevice := range desiredDevices { - if desiredDevice.IsFilter { - // the desired devices is a regular expression - matched, err = regexp.Match(desiredDevice.Name, []byte(device.Name)) - if err != nil { - logger.Errorf("regex failed on device %q and filter %q. %v", device.Name, desiredDevice.Name, err) - continue - } - - if matched { - logger.Infof("device %q matches device filter %q", device.Name, desiredDevice.Name) - } - } else if desiredDevice.IsDevicePathFilter { - pathnames := append(strings.Fields(device.DevLinks), filepath.Join("/dev", device.Name)) - for _, pathname := range pathnames { - matched, err = regexp.Match(desiredDevice.Name, []byte(pathname)) - if err != nil { - logger.Errorf("regex failed on device %q and filter %q. %v", device.Name, desiredDevice.Name, err) - continue - } - - if matched { - logger.Infof("device %q (aliases: %q) matches device path filter %q", device.Name, device.DevLinks, desiredDevice.Name) - break - } - } - } else if device.Name == desiredDevice.Name { - logger.Infof("%q found in the desired devices", device.Name) - matched = true - } else if strings.HasPrefix(desiredDevice.Name, "/dev/") { - devLinks := strings.Split(device.DevLinks, " ") - for _, link := range devLinks { - if link == desiredDevice.Name { - logger.Infof("%q found in the desired devices (matched by link: %q)", device.Name, link) - matched = true - break - } - } - } - matchedDevice = desiredDevice - - if matchedDevice.DeviceClass == "" { - classNotSet := true - if agent.pvcBacked { - crushDeviceClass := os.Getenv(oposd.CrushDeviceClassVarName) - if crushDeviceClass != "" { - matchedDevice.DeviceClass = crushDeviceClass - classNotSet = false - } - } - if classNotSet { - matchedDevice.DeviceClass = sys.GetDiskDeviceClass(device) - } - } - - if matched { - break - } - } - - if err == nil && matched { - // the current device matches the user specifies filter/list, use it for data - logger.Infof("device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name) - deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID, Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks), DeviceInfo: device} - - // set that this is not an OSD but a metadata device - if device.Type == pvcMetadataTypeDevice { - logger.Infof("metadata device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name) - deviceInfo = &DeviceOsdIDEntry{Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks), Metadata: []int{1}, DeviceInfo: device} - } - - // set that this is not an OSD but a wal device - if device.Type == pvcWalTypeDevice { - logger.Infof("wal device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name) - deviceInfo = &DeviceOsdIDEntry{Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks), Metadata: []int{2}, DeviceInfo: device} - } - } else { - logger.Infof("skipping device %q that does not match the device filter/list (%v). %v", device.Name, desiredDevices, err) - } - } else { - logger.Infof("skipping device %q until the admin specifies it can be used by an osd", device.Name) - } - - if deviceInfo != nil { - // When running on PVC, we typically have a single device only - // So it's fine to name the first entry of the map "data" instead of the PVC name - // It is particularly useful when a metadata PVC is used because we need to identify it in the map - // So the entry must be named "metadata" so it can accessed later - if agent.pvcBacked { - if device.Type == pvcDataTypeDevice { - available.Entries[pvcDataTypeDevice] = deviceInfo - } else if device.Type == pvcMetadataTypeDevice { - available.Entries[pvcMetadataTypeDevice] = deviceInfo - } else if device.Type == pvcWalTypeDevice { - available.Entries[pvcWalTypeDevice] = deviceInfo - } - } else { - available.Entries[device.Name] = deviceInfo - } - } - } - - return available, nil -} - -// releaseLVMDevice deactivates the LV to release the device. -func releaseLVMDevice(context *clusterd.Context, volumeGroupName string) error { - if op, err := context.Executor.ExecuteCommandWithCombinedOutput("lvchange", "-an", "-vv", volumeGroupName); err != nil { - return errors.Wrapf(err, "failed to deactivate LVM %s. output: %s", volumeGroupName, op) - } - logger.Info("successfully released device from lvm") - return nil -} - -// getVolumeGroupName returns the Volume group name from the given Logical Volume Path -func getVolumeGroupName(lvPath string) string { - vgSlice := strings.Split(lvPath, "/") - // Assert that lvpath is in correct format `/dev//` before extracting the vg name - if len(vgSlice) != 4 || vgSlice[2] == "" { - logger.Warningf("invalid LV Path: %q", lvPath) - return "" - } - - return vgSlice[2] -} diff --git a/pkg/daemon/ceph/osd/daemon_test.go b/pkg/daemon/ceph/osd/daemon_test.go deleted file mode 100644 index 12d7315c6..000000000 --- a/pkg/daemon/ceph/osd/daemon_test.go +++ /dev/null @@ -1,402 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package osd - -import ( - "strings" - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/rook/rook/pkg/util/sys" - "github.com/stretchr/testify/assert" -) - -const ( - udevFSOutput = ` -DEVNAME=/dev/sdk -DEVPATH=/devices/platform/host6/session2/target6:0:0/6:0:0:0/block/sdk -DEVTYPE=disk -ID_BUS=scsi -ID_FS_TYPE=ext2 -ID_FS_USAGE=filesystem -ID_FS_UUID=f2d38cba-37da-411d-b7ba-9a6696c58174 -ID_FS_UUID_ENC=f2d38cba-37da-411d-b7ba-9a6696c58174 -ID_FS_VERSION=1.0 -ID_MODEL=disk01 -ID_MODEL_ENC=disk01\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20 -ID_PATH=ip-127.0.0.1:3260-iscsi-iqn.2016-06.world.srv:storage.target01-lun-0 -ID_PATH_TAG=ip-127_0_0_1_3260-iscsi-iqn_2016-06_world_srv_storage_target01-lun-0 -ID_REVISION=4.0 -ID_SCSI=1 -ID_SCSI_SERIAL=d27e5d89-8829-468b-90ce-4ef8c02f07fe -ID_SERIAL=36001405d27e5d898829468b90ce4ef8c -ID_SERIAL_SHORT=6001405d27e5d898829468b90ce4ef8c -ID_TARGET_PORT=0 -ID_TYPE=disk -ID_VENDOR=LIO-ORG -ID_VENDOR_ENC=LIO-ORG\x20 -ID_WWN=0x6001405d27e5d898 -ID_WWN_VENDOR_EXTENSION=0x829468b90ce4ef8c -ID_WWN_WITH_EXTENSION=0x6001405d27e5d898829468b90ce4ef8c -MAJOR=8 -MINOR=160 -SUBSYSTEM=block -TAGS=:systemd: -USEC_INITIALIZED=15981915740802 -` - - udevPartOutput = ` -DEVNAME=/dev/sdt1 -DEVLINKS=/dev/disk/by-partlabel/test -DEVPATH=/devices/LNXSYSTM:00/LNXSYBUS:00/ACPI0004:00/VMBUS:00/763a35b7-6c97-461e-a494-c92c785255d0/host0/target0:0:0/0:0:0:0/block/sdt/sdt1 -DEVTYPE=partition -ID_BUS=scsi -ID_MODEL=Virtual_Disk -ID_MODEL_ENC=Virtual\x20Disk\x20\x20\x20\x20 -ID_PART_ENTRY_DISK=8:0 -ID_PART_ENTRY_NUMBER=2 -ID_PART_ENTRY_OFFSET=1050624 -ID_PART_ENTRY_SCHEME=gpt -ID_PART_ENTRY_SIZE=535818240 -ID_PART_ENTRY_TYPE=0fc63daf-8483-4772-8e79-3d69d8477de4 -ID_PART_ENTRY_UUID=ce8b0ba3-b2b6-48f8-8ffb-4231fef4a5b5 -ID_PART_TABLE_TYPE=gpt -ID_PART_TABLE_UUID=4180a289-da60-4d28-b951-91456d8848ed -ID_PATH=acpi-VMBUS:00-scsi-0:0:0:0 -ID_PATH_TAG=acpi-VMBUS_00-scsi-0_0_0_0 -ID_REVISION=1.0 -ID_SCSI=1 -ID_SERIAL=3600224807a025e35d9994b5f1d81cf8f -ID_SERIAL_SHORT=600224807a025e35d9994b5f1d81cf8f -ID_TYPE=disk -ID_VENDOR=Msft -ID_VENDOR_ENC=Msft\x20\x20\x20\x20 -ID_WWN=0x600224807a025e35 -ID_WWN_VENDOR_EXTENSION=0xd9994b5f1d81cf8f -ID_WWN_WITH_EXTENSION=0x600224807a025e35d9994b5f1d81cf8f -MAJOR=8 -MINOR=2 -PARTN=2 -SUBSYSTEM=block -TAGS=:systemd: -USEC_INITIALIZED=1128667 -` - - cvInventoryOutputAvailable = ` - { - "available":true, - "lvs":[ - - ], - "rejected_reasons":[ - "" - ], - "sys_api":{ - "size":10737418240.0, - "scheduler_mode":"mq-deadline", - "rotational":"0", - "vendor":"", - "human_readable_size":"10.00 GB", - "sectors":0, - "sas_device_handle":"", - "rev":"", - "sas_address":"", - "locked":0, - "sectorsize":"512", - "removable":"0", - "path":"/dev/sdb", - "support_discard":"0", - "model":"", - "ro":"0", - "nr_requests":"64", - "partitions":{ - - } - }, - "path":"/dev/sdb", - "device_id":"" - } - ` - - cvInventoryOutputNotAvailableBluestoreLabel = ` - { - "available":false, - "lvs":[ - - ], - "rejected_reasons":[ - "Has BlueStore device label" - ] - } - ` - - cvInventoryOutputNotAvailableLocked = ` - { - "available":false, - "lvs":[ - - ], - "rejected_reasons":[ - "locked" - ] - } - ` - - cvInventoryOutputNotAvailableSmall = ` - { - "available":false, - "lvs":[ - - ], - "rejected_reasons":[ - ["Insufficient space (<5GB)"] - ] - } - ` -) - -func TestAvailableDevices(t *testing.T) { - // set up a mock function to return "rook owned" partitions on the device and it does not have a filesystem - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("OUTPUT for %s %v", command, args) - - if command == "lsblk" { - if strings.Contains(args[3], "sdb") { - // /dev/sdb has a partition - return `NAME="sdb" SIZE="65" TYPE="disk" PKNAME="" -NAME="sdb1" SIZE="30" TYPE="part" PKNAME="sdb"`, nil - } else if strings.Contains(args[0], "vg1-lv") { - // /dev/mapper/vg1-lv* are LVs - return `TYPE="lvm"`, nil - } else if strings.Contains(args[0], "sdt1") { - return `TYPE="part"`, nil - } else if strings.HasPrefix(args[0], "/dev") { - return `TYPE="disk"`, nil - } - return "", nil - } else if command == "blkid" { - if strings.Contains(args[3], "sdb1") { - // partition sdb1 has a label MY-PART - return "MY-PART", nil - } - } else if command == "udevadm" { - if strings.Contains(args[2], "sdc") { - // /dev/sdc has a file system - return udevFSOutput, nil - } else if strings.Contains(args[2], "sdt1") { - return udevPartOutput, nil - } - - return "", nil - } else if command == "dmsetup" && args[0] == "info" { - if strings.Contains(args[5], "vg1-lv1") { - return "vg1-lv1", nil - } else if strings.Contains(args[5], "vg1-lv2") { - return "vg1-lv2", nil - } - } else if command == "dmsetup" && args[0] == "splitname" { - if strings.Contains(args[2], "vg1-lv1") { - return "vg1:lv1:", nil - } else if strings.Contains(args[2], "vg1-lv2") { - return "vg1:lv2:", nil - } - } else if command == "ceph-volume" { - if args[0] == "inventory" { - if strings.Contains(args[3], "/mnt/set1-0-data-qfhfk") { - return cvInventoryOutputNotAvailableBluestoreLabel, nil - } else if strings.Contains(args[3], "sdb") { - // sdb is locked - return cvInventoryOutputNotAvailableLocked, nil - } else if strings.Contains(args[3], "sdc") { - // sdc is too small - return cvInventoryOutputNotAvailableSmall, nil - } - - return cvInventoryOutputAvailable, nil - } - - } else if command == "stdbuf" { - if args[4] == "raw" && args[5] == "list" { - return cephVolumeRAWTestResult, nil - } else if command == "ceph-volume" && args[0] == "lvm" { - if args[4] == "vg1/lv2" { - return `{"0":[{"name":"lv2","type":"block"}]}`, nil - } - } - return "{}", nil - - } - - return "", errors.Errorf("unknown command %s %s", command, args) - }, - } - - context := &clusterd.Context{Executor: executor} - context.Devices = []*sys.LocalDisk{ - {Name: "sda", DevLinks: "/dev/disk/by-id/scsi-0123 /dev/disk/by-path/pci-0:1:2:3-scsi-1", RealPath: "/dev/sda"}, - {Name: "sdb", DevLinks: "/dev/disk/by-id/scsi-4567 /dev/disk/by-path/pci-4:5:6:7-scsi-1", RealPath: "/dev/sdb"}, - {Name: "sdc", DevLinks: "/dev/disk/by-id/scsi-89ab /dev/disk/by-path/pci-8:9:a:b-scsi-1", RealPath: "/dev/sdc"}, - {Name: "sdd", DevLinks: "/dev/disk/by-id/scsi-cdef /dev/disk/by-path/pci-c:d:e:f-scsi-1", RealPath: "/dev/sdd"}, - {Name: "sde", DevLinks: "/dev/disk/by-id/sde-0x0000 /dev/disk/by-path/pci-0000:00:18.0-ata-1", RealPath: "/dev/sde"}, - {Name: "nvme01", DevLinks: "/dev/disk/by-id/nvme-0246 /dev/disk/by-path/pci-0:2:4:6-nvme-1", RealPath: "/dev/nvme01"}, - {Name: "rda", RealPath: "/dev/rda"}, - {Name: "rdb", RealPath: "/dev/rdb"}, - {Name: "sdt1", RealPath: "/dev/sdt1", Type: sys.PartType}, - {Name: "sdv1", RealPath: "/dev/sdv1", Type: sys.PartType, Filesystem: "ext2"}, // has filesystem - } - - version := cephver.Octopus - - // select all devices, including nvme01 for metadata - pvcBackedOSD := false - agent := &OsdAgent{ - devices: []DesiredDevice{{Name: "all"}}, - metadataDevice: "nvme01", - pvcBacked: pvcBackedOSD, - clusterInfo: &cephclient.ClusterInfo{}, - } - agent.clusterInfo.CephVersion = version - mapping, err := getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 7, len(mapping.Entries)) - assert.Equal(t, -1, mapping.Entries["sda"].Data) - assert.Equal(t, -1, mapping.Entries["sdd"].Data) - assert.Equal(t, -1, mapping.Entries["sde"].Data) - assert.Equal(t, -1, mapping.Entries["rda"].Data) - assert.Equal(t, -1, mapping.Entries["rdb"].Data) - assert.Equal(t, -1, mapping.Entries["nvme01"].Data) - assert.NotNil(t, mapping.Entries["nvme01"].Metadata) - assert.Equal(t, 0, len(mapping.Entries["nvme01"].Metadata)) - assert.Equal(t, -1, mapping.Entries["sdt1"].Data) - assert.NotContains(t, mapping.Entries, "sdb") // sdb is in use (has a partition) - assert.NotContains(t, mapping.Entries, "sdc") // sdc is too small - assert.NotContains(t, mapping.Entries, "sdv1") // sdv1 has a filesystem - - // Partition is skipped - agent.clusterInfo.CephVersion = cephver.Nautilus - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 6, len(mapping.Entries)) - - // Do not skip partition anymore - agent.clusterInfo.CephVersion = cephver.Octopus - - // select no devices both using and not using a filter - agent.metadataDevice = "" - agent.devices = nil - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 0, len(mapping.Entries)) - - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 0, len(mapping.Entries)) - - // select the sd* devices - agent.devices = []DesiredDevice{{Name: "^sd.$", IsFilter: true}} - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 3, len(mapping.Entries)) - assert.Equal(t, -1, mapping.Entries["sda"].Data) - assert.Equal(t, -1, mapping.Entries["sdd"].Data) - - // select an exact device - agent.devices = []DesiredDevice{{Name: "sdd"}} - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 1, len(mapping.Entries)) - assert.Equal(t, -1, mapping.Entries["sdd"].Data) - - // select all devices except those that have a prefix of "s" - agent.devices = []DesiredDevice{{Name: "^[^s]", IsFilter: true}} - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 3, len(mapping.Entries)) - assert.Equal(t, -1, mapping.Entries["rda"].Data) - assert.Equal(t, -1, mapping.Entries["rdb"].Data) - assert.Equal(t, -1, mapping.Entries["nvme01"].Data) - - // select the sd* devices by devicePathFilter - agent.devices = []DesiredDevice{{Name: "^/dev/sd.$", IsDevicePathFilter: true}} - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 3, len(mapping.Entries)) - assert.Equal(t, -1, mapping.Entries["sda"].Data) - assert.Equal(t, -1, mapping.Entries["sdd"].Data) - - // select the devices that have udev persistent names by devicePathFilter - agent.devices = []DesiredDevice{{Name: "^/dev/disk/by-path/.*-scsi-.*", IsDevicePathFilter: true}} - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 2, len(mapping.Entries)) - assert.Equal(t, -1, mapping.Entries["sda"].Data) - assert.Equal(t, -1, mapping.Entries["sdd"].Data) - agent.devices = []DesiredDevice{{Name: "^/dev/disk/by-partlabel/te.*", IsDevicePathFilter: true}} - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 1, len(mapping.Entries)) - assert.Equal(t, -1, mapping.Entries["sdt1"].Data) - - // select a device by explicit link - agent.devices = []DesiredDevice{{Name: "/dev/disk/by-id/sde-0x0000"}} - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 1, len(mapping.Entries)) - assert.Equal(t, -1, mapping.Entries["sde"].Data) - agent.devices = []DesiredDevice{{Name: "/dev/disk/by-partlabel/test"}} - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 1, len(mapping.Entries)) - assert.Equal(t, -1, mapping.Entries["sdt1"].Data) - - // test on PVC - context.Devices = []*sys.LocalDisk{ - {Name: "/mnt/set1-0-data-qfhfk", RealPath: "/dev/xvdcy", Type: "data"}, - } - agent.devices = []DesiredDevice{{Name: "all"}} - agent.pvcBacked = true - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 1, len(mapping.Entries), mapping) - - // on PVC, backed by LV, available - context.Devices = []*sys.LocalDisk{ - {Name: "/mnt/set1-0-data-wjkla", RealPath: "/dev/mapper/vg1-lv1", Type: "data"}, - } - mapping, err = getAvailableDevices(context, agent) - assert.Nil(t, err) - assert.Equal(t, 1, len(mapping.Entries), mapping) -} - -func TestGetVolumeGroupName(t *testing.T) { - validLVPath := "/dev/vgName1/lvName2" - invalidLVPath1 := "/dev//vgName2" - invalidLVPath2 := "/dev/" - - vgName := getVolumeGroupName(validLVPath) - assert.Equal(t, vgName, "vgName1") - - vgName = getVolumeGroupName(invalidLVPath1) - assert.Equal(t, vgName, "") - - vgName = getVolumeGroupName(invalidLVPath2) - assert.Equal(t, vgName, "") -} diff --git a/pkg/daemon/ceph/osd/device.go b/pkg/daemon/ceph/osd/device.go deleted file mode 100644 index 152666502..000000000 --- a/pkg/daemon/ceph/osd/device.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "encoding/json" - - "github.com/rook/rook/pkg/util/sys" -) - -const ( - bootstrapOSDKeyringTemplate = ` -[client.bootstrap-osd] - key = %s - caps mon = "allow profile bootstrap-osd" -` -) - -// Device is a device -type Device struct { - Name string `json:"name"` - NodeID string `json:"nodeId"` - Dir bool `json:"bool"` -} - -// DesiredDevice keeps track of the desired settings for a device -type DesiredDevice struct { - Name string - OSDsPerDevice int - MetadataDevice string - DatabaseSizeMB int - DeviceClass string - InitialWeight string - IsFilter bool - IsDevicePathFilter bool -} - -// DeviceOsdMapping represents the mapping of an OSD on disk -type DeviceOsdMapping struct { - Entries map[string]*DeviceOsdIDEntry // device name to OSD ID mapping entry -} - -// DeviceOsdIDEntry represents the details of an OSD -type DeviceOsdIDEntry struct { - Data int // OSD ID that has data stored here - Metadata []int // OSD IDs (multiple) that have metadata stored here - Config DesiredDevice // Device specific config options - PersistentDevicePaths []string - DeviceInfo *sys.LocalDisk // low-level info about the device -} - -func (m *DeviceOsdMapping) String() string { - b, _ := json.Marshal(m) - return string(b) -} diff --git a/pkg/daemon/ceph/osd/device_test.go b/pkg/daemon/ceph/osd/device_test.go deleted file mode 100644 index 3b9fc8f04..000000000 --- a/pkg/daemon/ceph/osd/device_test.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package osd - -import ( - "io/ioutil" - "os" - "path" - "strings" - "testing" - - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestOSDBootstrap(t *testing.T) { - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - return "{\"key\":\"mysecurekey\"}", nil - }, - } - - context := &clusterd.Context{Executor: executor, ConfigDir: configDir} - defer os.RemoveAll(context.ConfigDir) - err := createOSDBootstrapKeyring(context, &client.ClusterInfo{Namespace: "name"}, configDir) - assert.Nil(t, err) - - targetPath := path.Join(configDir, bootstrapOsdKeyring) - contents, err := ioutil.ReadFile(targetPath) - assert.Nil(t, err) - assert.NotEqual(t, -1, strings.Index(string(contents), "[client.bootstrap-osd]")) - assert.NotEqual(t, -1, strings.Index(string(contents), "key = mysecurekey")) - assert.NotEqual(t, -1, strings.Index(string(contents), "caps mon = \"allow profile bootstrap-osd\"")) -} diff --git a/pkg/daemon/ceph/osd/encryption.go b/pkg/daemon/ceph/osd/encryption.go deleted file mode 100644 index 5c5cd4abf..000000000 --- a/pkg/daemon/ceph/osd/encryption.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "fmt" - "os" - "regexp" - "strings" - - "github.com/pkg/errors" - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/daemon/ceph/osd/kms" - oposd "github.com/rook/rook/pkg/operator/ceph/cluster/osd" -) - -const ( - cryptsetupBinary = "cryptsetup" - dmsetupBinary = "dmsetup" -) - -var ( - luksLabelCephFSID = regexp.MustCompile("ceph_fsid=(.*)") -) - -func closeEncryptedDevice(context *clusterd.Context, dmName string) error { - args := []string{"--verbose", "luksClose", dmName} - cryptsetupOut, err := context.Executor.ExecuteCommandWithCombinedOutput(cryptsetupBinary, args...) - if err != nil { - return errors.Wrapf(err, "failed to close encrypted device. %s", cryptsetupOut) - } - - logger.Infof("dm version:\n%s", cryptsetupOut) - return nil -} - -func dmsetupVersion(context *clusterd.Context) error { - args := []string{"version"} - dmsetupOut, err := context.Executor.ExecuteCommandWithCombinedOutput(dmsetupBinary, args...) - if err != nil { - return errors.Wrapf(err, "failed to find device mapper version. %s", dmsetupOut) - } - - logger.Info(dmsetupOut) - return nil -} - -func setKEKinEnv(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo) error { - // KMS details are passed by the Operator as env variables in the pod - // The token if any is mounted in the provisioner pod as an env variable so the secrets lib will pick it up - kmsConfig := kms.NewConfig(context, &v1.ClusterSpec{Security: v1.SecuritySpec{KeyManagementService: v1.KeyManagementServiceSpec{ConnectionDetails: kms.ConfigEnvsToMapString()}}}, clusterInfo) - if kmsConfig.IsVault() { - // Fetch the KEK - kek, err := kmsConfig.GetSecret(os.Getenv(oposd.PVCNameEnvVarName)) - if err != nil { - return errors.Wrapf(err, "failed to retrieve key encryption key from %q kms", kmsConfig.Provider) - } - - // Set the KEK as an env variable for ceph-volume - err = os.Setenv(oposd.CephVolumeEncryptedKeyEnvVarName, kek) - if err != nil { - return errors.Wrap(err, "failed to set key encryption key env variable for ceph-volume") - } - } - - return nil -} - -func setLUKSLabelAndSubsystem(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, disk string) error { - // The PVC info is a nice to have - pvcName := os.Getenv(oposd.PVCNameEnvVarName) - if pvcName == "" { - return errors.Errorf("failed to find %q environment variable", oposd.PVCNameEnvVarName) - } - subsystem := fmt.Sprintf("ceph_fsid=%s", clusterInfo.FSID) - label := fmt.Sprintf("pvc_name=%s", pvcName) - - logger.Infof("setting LUKS subsystem to %q and label to %q to disk %q", subsystem, label, disk) - args := []string{"config", disk, "--subsystem", subsystem, "--label", label} - output, err := context.Executor.ExecuteCommandWithCombinedOutput(cryptsetupBinary, args...) - if err != nil { - return errors.Wrapf(err, "failed to set subsystem %q and label %q to encrypted device %q. is your distro built with LUKS1 as a default?. %s", subsystem, label, disk, output) - } - - logger.Infof("successfully set LUKS subsystem to %q and label to %q to disk %q", subsystem, label, disk) - return nil -} - -func dumpLUKS(context *clusterd.Context, disk string) (string, error) { - args := []string{"luksDump", disk} - cryptsetupOut, err := context.Executor.ExecuteCommandWithCombinedOutput(cryptsetupBinary, args...) - if err != nil { - return "", errors.Wrapf(err, "failed to dump LUKS header for disk %q. %s", disk, cryptsetupOut) - } - - return cryptsetupOut, nil -} - -func isCephEncryptedBlock(context *clusterd.Context, currentClusterFSID string, disk string) bool { - metadata, err := dumpLUKS(context, disk) - if err != nil { - logger.Errorf("failed to determine if the encrypted block %q is from our cluster. %v", disk, err) - return false - } - - // Now we parse the CLI output - // JSON output is only available with cryptsetup 2.4.x - https://gitlab.com/cryptsetup/cryptsetup/-/issues/511 - ceph_fsid := luksLabelCephFSID.FindString(metadata) - if ceph_fsid == "" { - logger.Error("failed to find ceph_fsid in the LUKS header, the encrypted disk is not from a ceph cluster") - return false - } - - // is it an OSD from our cluster? - currentDiskCephFSID := strings.SplitAfter(ceph_fsid, "=")[1] - if currentDiskCephFSID != currentClusterFSID { - logger.Errorf("encrypted disk %q is part of a different ceph cluster %q", disk, currentDiskCephFSID) - return false - } - - return true - -} diff --git a/pkg/daemon/ceph/osd/encryption_test.go b/pkg/daemon/ceph/osd/encryption_test.go deleted file mode 100644 index 359d13085..000000000 --- a/pkg/daemon/ceph/osd/encryption_test.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -var ( - luksDump = `LUKS header information -Version: 2 -Epoch: 13 -Metadata area: 12288 bytes -UUID: a97525ee-7c30-4f70-89ac-e56d48907cc5 -Label: pvc_name=set1-data-0lmdjp -Subsystem: ceph_fsid=811e7dc0-ea13-4951-b000-24a8565d0735 -Flags: (no flags) - -Data segments: - 0: crypt - offset: 2097152 [bytes] - length: (whole device) - cipher: aes-xts-plain64 - sector: 512 [bytes] - -Keyslots: - 0: luks2 - Key: 256 bits - Priority: normal - Cipher: aes-xts-plain64 - PBKDF: pbkdf2 - Hash: sha256 - Iterations: 583190 - Salt: 4f 9d 0d 0b 83 41 2f 47 b4 1f 6b 35 df 89 e0 33 - c8 bd 27 60 22 a5 f5 02 62 94 a9 92 12 2a 4f c0 - AF stripes: 4000 - Area offset:32768 [bytes] - Area length:131072 [bytes] - Digest ID: 0 -Tokens: -Digests: - 0: pbkdf2 - Hash: sha256 - Iterations: 36127 - Salt: db 98 33 3a d4 15 b6 6c 48 63 6d 7b 33 b0 7e cd - ef 90 8d 81 46 37 78 b4 82 37 3b 84 e8 e7 d8 1b - Digest: 6d 86 96 05 99 4f a9 48 87 54 - 5c ef 4b 99 3b 9d fa 0b 8f 8a` -) - -func TestCloseEncryptedDevice(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if command == "cryptsetup" && args[0] == "--verbose" && args[1] == "luksClose" { - return "success", nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - - context := &clusterd.Context{Executor: executor} - err := closeEncryptedDevice(context, "/dev/mapper/ceph-43e9efed-0676-4731-b75a-a4c42ece1bb1-xvdbr-block-dmcrypt") - assert.NoError(t, err) -} - -func TestDmsetupVersion(t *testing.T) { - dmsetupOutput := ` -Library version: 1.02.154 (2018-12-07) -Driver version: 4.40.0 -` - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if command == "dmsetup" && args[0] == "version" { - return dmsetupOutput, nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - - context := &clusterd.Context{Executor: executor} - err := dmsetupVersion(context) - assert.NoError(t, err) -} - -func TestIsCephEncryptedBlock(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if command == cryptsetupBinary && args[0] == "luksDump" { - return luksDump, nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - context := &clusterd.Context{Executor: executor} - - t.Run("different fsid", func(t *testing.T) { - isCephEncryptedBlock := isCephEncryptedBlock(context, "foo", "/dev/sda1") - assert.False(t, isCephEncryptedBlock) - }) - t.Run("same cluster", func(t *testing.T) { - isCephEncryptedBlock := isCephEncryptedBlock(context, "811e7dc0-ea13-4951-b000-24a8565d0735", "/dev/sda1") - assert.True(t, isCephEncryptedBlock) - }) -} diff --git a/pkg/daemon/ceph/osd/init.go b/pkg/daemon/ceph/osd/init.go deleted file mode 100644 index 433947809..000000000 --- a/pkg/daemon/ceph/osd/init.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "fmt" - - "path" - - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" -) - -const ( - bootstrapOsdKeyring = "bootstrap-osd/ceph.keyring" -) - -// create a keyring for the bootstrap-osd client, it gets a limited set of privileges -func createOSDBootstrapKeyring(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, rootDir string) error { - username := "client.bootstrap-osd" - keyringPath := path.Join(rootDir, bootstrapOsdKeyring) - access := []string{"mon", "allow profile bootstrap-osd"} - keyringEval := func(key string) string { - return fmt.Sprintf(bootstrapOSDKeyringTemplate, key) - } - - return cephclient.CreateKeyring(context, clusterInfo, username, keyringPath, access, keyringEval) -} diff --git a/pkg/daemon/ceph/osd/kms/envs.go b/pkg/daemon/ceph/osd/kms/envs.go deleted file mode 100644 index 4c061bcc2..000000000 --- a/pkg/daemon/ceph/osd/kms/envs.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "os" - "path" - "sort" - "strings" - - "github.com/hashicorp/vault/api" - "github.com/libopenstorage/secrets/vault" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/daemon/ceph/client" - v1 "k8s.io/api/core/v1" -) - -var ( - knownKMSPrefix = []string{"VAULT_"} -) - -// VaultTokenEnvVarFromSecret returns the kms token secret value as an env var -func vaultTokenEnvVarFromSecret(tokenSecretName string) v1.EnvVar { - return v1.EnvVar{ - Name: api.EnvVaultToken, - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: tokenSecretName, - }, - Key: KMSTokenSecretNameKey, - }, - }, - } -} - -// vaultTLSEnvVarFromSecret translates TLS env var which are set to k8s secret name to their actual path on the fs once mounted as volume -// See: TLSSecretVolumeAndMount() for more details -func vaultTLSEnvVarFromSecret(kmsConfig map[string]string) []v1.EnvVar { - vaultTLSEnvVar := []v1.EnvVar{} - - for _, tlsOption := range cephv1.VaultTLSConnectionDetails { - tlsSecretName := GetParam(kmsConfig, tlsOption) - if tlsSecretName != "" { - vaultTLSEnvVar = append(vaultTLSEnvVar, v1.EnvVar{Name: tlsOption, Value: path.Join(EtcVaultDir, tlsSecretPath(tlsOption))}) - } - } - - return vaultTLSEnvVar -} - -// VaultConfigToEnvVar populates the kms config as env variables -func VaultConfigToEnvVar(spec cephv1.ClusterSpec) []v1.EnvVar { - envs := []v1.EnvVar{} - backendPath := GetParam(spec.Security.KeyManagementService.ConnectionDetails, vault.VaultBackendPathKey) - // Set BACKEND_PATH to the API's default if not passed - if backendPath == "" { - spec.Security.KeyManagementService.ConnectionDetails[vault.VaultBackendPathKey] = vault.DefaultBackendPath - } - for k, v := range spec.Security.KeyManagementService.ConnectionDetails { - // Skip TLS and token env var to avoid env being set multiple times - toSkip := append(cephv1.VaultTLSConnectionDetails, api.EnvVaultToken) - if client.StringInSlice(k, toSkip) { - continue - } - envs = append(envs, v1.EnvVar{Name: k, Value: v}) - } - - // Add the VAULT_TOKEN - envs = append(envs, vaultTokenEnvVarFromSecret(spec.Security.KeyManagementService.TokenSecretName)) - - // Add TLS env if any - envs = append(envs, vaultTLSEnvVarFromSecret(spec.Security.KeyManagementService.ConnectionDetails)...) - - logger.Debugf("kms envs are %v", envs) - - // Sort env vars since the input is a map which by nature is unsorted... - return sortV1EnvVar(envs) -} - -// ConfigEnvsToMapString returns all the env variables in map from a known KMS -func ConfigEnvsToMapString() map[string]string { - envs := make(map[string]string) - for _, e := range os.Environ() { - pair := strings.SplitN(e, "=", 2) - for _, knownKMS := range knownKMSPrefix { - if strings.HasPrefix(pair[0], knownKMS) || pair[0] == Provider { - envs[pair[0]] = os.Getenv(pair[0]) - } - } - } - - return envs -} - -// sortV1EnvVar sorts a list of v1.EnvVar -func sortV1EnvVar(envs []v1.EnvVar) []v1.EnvVar { - sort.SliceStable(envs, func(i, j int) bool { - return envs[i].Name < envs[j].Name - }) - - return envs -} diff --git a/pkg/daemon/ceph/osd/kms/envs_test.go b/pkg/daemon/ceph/osd/kms/envs_test.go deleted file mode 100644 index 67aba932a..000000000 --- a/pkg/daemon/ceph/osd/kms/envs_test.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "os" - "sort" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" -) - -func TestVaultTLSEnvVarFromSecret(t *testing.T) { - // No TLS - spec := cephv1.ClusterSpec{Security: cephv1.SecuritySpec{KeyManagementService: cephv1.KeyManagementServiceSpec{TokenSecretName: "vault-token", ConnectionDetails: map[string]string{"KMS_PROVIDER": "vault", "VAULT_ADDR": "http://1.1.1.1:8200"}}}} - envVars := VaultConfigToEnvVar(spec) - areEnvVarsSorted := sort.SliceIsSorted(envVars, func(i, j int) bool { - return envVars[i].Name < envVars[j].Name - }) - assert.True(t, areEnvVarsSorted) - assert.Equal(t, 4, len(envVars)) - assert.Contains(t, envVars, v1.EnvVar{Name: "KMS_PROVIDER", Value: "vault"}) - assert.Contains(t, envVars, v1.EnvVar{Name: "VAULT_ADDR", Value: "http://1.1.1.1:8200"}) - assert.Contains(t, envVars, v1.EnvVar{Name: "VAULT_TOKEN", ValueFrom: &v1.EnvVarSource{SecretKeyRef: &v1.SecretKeySelector{LocalObjectReference: v1.LocalObjectReference{Name: "vault-token"}, Key: "token"}}}) - assert.Contains(t, envVars, v1.EnvVar{Name: "VAULT_BACKEND_PATH", Value: "secret/"}) - - // TLS - spec = cephv1.ClusterSpec{Security: cephv1.SecuritySpec{KeyManagementService: cephv1.KeyManagementServiceSpec{TokenSecretName: "vault-token", ConnectionDetails: map[string]string{"KMS_PROVIDER": "vault", "VAULT_ADDR": "http://1.1.1.1:8200", "VAULT_CACERT": "vault-ca-cert-secret"}}}} - envVars = VaultConfigToEnvVar(spec) - areEnvVarsSorted = sort.SliceIsSorted(envVars, func(i, j int) bool { - return envVars[i].Name < envVars[j].Name - }) - assert.True(t, areEnvVarsSorted) - assert.Equal(t, 5, len(envVars)) - assert.Contains(t, envVars, v1.EnvVar{Name: "KMS_PROVIDER", Value: "vault"}) - assert.Contains(t, envVars, v1.EnvVar{Name: "VAULT_ADDR", Value: "http://1.1.1.1:8200"}) - assert.Contains(t, envVars, v1.EnvVar{Name: "VAULT_CACERT", Value: "/etc/vault/vault.ca"}) - assert.Contains(t, envVars, v1.EnvVar{Name: "VAULT_TOKEN", ValueFrom: &v1.EnvVarSource{SecretKeyRef: &v1.SecretKeySelector{LocalObjectReference: v1.LocalObjectReference{Name: "vault-token"}, Key: "token"}}}) - -} - -func TestConfigEnvsToMapString(t *testing.T) { - // No VAULT envs - envs := ConfigEnvsToMapString() - assert.Equal(t, 0, len(envs)) - - // Single KMS value - os.Setenv("KMS_PROVIDER", "vault") - defer os.Unsetenv("KMS_PROVIDER") - envs = ConfigEnvsToMapString() - assert.Equal(t, 1, len(envs)) - - // Some more Vault KMS with one intruder - os.Setenv("KMS_PROVIDER", "vault") - defer os.Unsetenv("KMS_PROVIDER") - os.Setenv("VAULT_ADDR", "1.1.1.1") - defer os.Unsetenv("VAULT_ADDR") - os.Setenv("VAULT_SKIP_VERIFY", "true") - defer os.Unsetenv("VAULT_SKIP_VERIFY") - os.Setenv("foo", "bar") - defer os.Unsetenv("foo") - envs = ConfigEnvsToMapString() - assert.Equal(t, 3, len(envs)) -} diff --git a/pkg/daemon/ceph/osd/kms/k8s.go b/pkg/daemon/ceph/osd/kms/k8s.go deleted file mode 100644 index d1e693314..000000000 --- a/pkg/daemon/ceph/osd/kms/k8s.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // OsdEncryptionSecretNameKeyName is the key name of the Secret that contains the OSD encryption key - // #nosec G101 since this is not leaking any hardcoded credentials, it's just the secret key name - OsdEncryptionSecretNameKeyName = "dmcrypt-key" - - // #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name - osdEncryptionSecretNamePrefix = "rook-ceph-osd-encryption-key" - - // KMSTokenSecretNameKey is the key name of the Secret that contains the KMS authentication token - KMSTokenSecretNameKey = "token" -) - -// storeSecretInKubernetes stores the dmcrypt key in a Kubernetes Secret -func (c *Config) storeSecretInKubernetes(pvcName, key string) error { - ctx := context.TODO() - s, err := generateOSDEncryptedKeySecret(pvcName, key, c.clusterInfo) - if err != nil { - return err - } - - // Create the Kubernetes Secret - _, err = c.context.Clientset.CoreV1().Secrets(c.clusterInfo.Namespace).Create(ctx, s, metav1.CreateOptions{}) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to save ceph osd encryption key as a secret for pvc %q", pvcName) - } - - return nil -} - -func generateOSDEncryptedKeySecret(pvcName, key string, clusterInfo *cephclient.ClusterInfo) (*v1.Secret, error) { - s := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: GenerateOSDEncryptionSecretName(pvcName), - Namespace: clusterInfo.Namespace, - Labels: map[string]string{ - "pvc_name": pvcName, - }, - }, - StringData: map[string]string{ - OsdEncryptionSecretNameKeyName: key, - }, - Type: k8sutil.RookType, - } - - // Set the ownerref to the Secret - err := clusterInfo.OwnerInfo.SetControllerReference(s) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to osd encryption key secret %q", s.Name) - } - - return s, nil -} - -// GenerateOSDEncryptionSecretName generate the Kubernetes Secret name of the encrypted key -func GenerateOSDEncryptionSecretName(pvcName string) string { - return fmt.Sprintf("%s-%s", osdEncryptionSecretNamePrefix, pvcName) -} - -// IsK8s determines whether the configured KMS is Kubernetes -func (c *Config) IsK8s() bool { - return c.Provider == "kubernetes" || c.Provider == "k8s" -} diff --git a/pkg/daemon/ceph/osd/kms/k8s_test.go b/pkg/daemon/ceph/osd/kms/k8s_test.go deleted file mode 100644 index a8fac343a..000000000 --- a/pkg/daemon/ceph/osd/kms/k8s_test.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGenerateOSDEncryptionSecretName(t *testing.T) { - assert.Equal(t, "rook-ceph-osd-encryption-key-set1-data-0-7dwll", GenerateOSDEncryptionSecretName("set1-data-0-7dwll")) -} diff --git a/pkg/daemon/ceph/osd/kms/kms.go b/pkg/daemon/ceph/osd/kms/kms.go deleted file mode 100644 index 2531ba637..000000000 --- a/pkg/daemon/ceph/osd/kms/kms.go +++ /dev/null @@ -1,246 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "context" - "os" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/hashicorp/vault/api" - "github.com/libopenstorage/secrets" - "github.com/libopenstorage/secrets/vault" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // Provider is the config name for the KMS provider type - Provider = "KMS_PROVIDER" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-kms") - kmsMandatoryConnectionDetails = []string{Provider} -) - -// Config is the generic configuration for the KMS -type Config struct { - Provider string - context *clusterd.Context - clusterSpec *cephv1.ClusterSpec - clusterInfo *cephclient.ClusterInfo -} - -// NewConfig returns the selected KMS -func NewConfig(context *clusterd.Context, clusterSpec *cephv1.ClusterSpec, clusterInfo *cephclient.ClusterInfo) *Config { - config := &Config{ - context: context, - clusterInfo: clusterInfo, - clusterSpec: clusterSpec, - } - - Provider := clusterSpec.Security.KeyManagementService.ConnectionDetails[Provider] - switch Provider { - case "": - config.Provider = secrets.TypeK8s - case secrets.TypeVault: - config.Provider = secrets.TypeVault - default: - logger.Errorf("unsupported kms type %q", Provider) - } - - return config -} - -// PutSecret writes an encrypted key in a KMS -func (c *Config) PutSecret(secretName, secretValue string) error { - // If Kubernetes Secret KMS is selected (default) - if c.IsK8s() { - // Store the secret in Kubernetes Secrets - err := c.storeSecretInKubernetes(secretName, secretValue) - if err != nil { - return errors.Wrap(err, "failed to store secret in kubernetes secret") - } - } - if c.IsVault() { - // Store the secret in Vault - v, err := InitVault(c.context, c.clusterInfo.Namespace, c.clusterSpec.Security.KeyManagementService.ConnectionDetails) - if err != nil { - return errors.Wrap(err, "failed to init vault kms") - } - k := buildKeyContext(c.clusterSpec.Security.KeyManagementService.ConnectionDetails) - err = put(v, GenerateOSDEncryptionSecretName(secretName), secretValue, k) - if err != nil { - return errors.Wrap(err, "failed to put secret in vault") - } - } - - return nil -} - -// GetSecret returns an encrypted key from a KMS -func (c *Config) GetSecret(secretName string) (string, error) { - var value string - if c.IsVault() { - // Store the secret in Vault - v, err := InitVault(c.context, c.clusterInfo.Namespace, c.clusterSpec.Security.KeyManagementService.ConnectionDetails) - if err != nil { - return "", errors.Wrap(err, "failed to get secret in vault") - } - - k := buildKeyContext(c.clusterSpec.Security.KeyManagementService.ConnectionDetails) - value, err = get(v, GenerateOSDEncryptionSecretName(secretName), k) - if err != nil { - return "", errors.Wrap(err, "failed to get secret in vault") - } - } - - return value, nil -} - -// DeleteSecret deletes an encrypted key from a KMS -func (c *Config) DeleteSecret(secretName string) error { - if c.IsVault() { - // Store the secret in Vault - v, err := InitVault(c.context, c.clusterInfo.Namespace, c.clusterSpec.Security.KeyManagementService.ConnectionDetails) - if err != nil { - return errors.Wrap(err, "failed to delete secret in vault") - } - - k := buildKeyContext(c.clusterSpec.Security.KeyManagementService.ConnectionDetails) - - // Force removal of all the versions of the secret on K/V version 2 - k[secrets.DestroySecret] = "true" - - err = delete(v, GenerateOSDEncryptionSecretName(secretName), k) - if err != nil { - return errors.Wrap(err, "failed to delete secret in vault") - } - } - - return nil -} - -// GetParam returns the value of the KMS config option -func GetParam(kmsConfig map[string]string, param string) string { - if val, ok := kmsConfig[param]; ok && val != "" { - return strings.TrimSpace(val) - } - return "" -} - -// ValidateConnectionDetails validates mandatory KMS connection details -func ValidateConnectionDetails(clusterdContext *clusterd.Context, securitySpec cephv1.SecuritySpec, ns string) error { - ctx := context.TODO() - // A token must be specified - if !securitySpec.KeyManagementService.IsTokenAuthEnabled() { - return errors.New("failed to validate kms configuration (missing token in spec)") - } - - // KMS provider must be specified - provider := GetParam(securitySpec.KeyManagementService.ConnectionDetails, Provider) - - // Validate potential token Secret presence - if securitySpec.KeyManagementService.IsTokenAuthEnabled() { - kmsToken, err := clusterdContext.Clientset.CoreV1().Secrets(ns).Get(ctx, securitySpec.KeyManagementService.TokenSecretName, metav1.GetOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to fetch kms token secret %q", securitySpec.KeyManagementService.TokenSecretName) - } - - // Check for empty token - token, ok := kmsToken.Data[KMSTokenSecretNameKey] - if !ok || len(token) == 0 { - return errors.Errorf("failed to read k8s kms secret %q key %q (not found or empty)", KMSTokenSecretNameKey, securitySpec.KeyManagementService.TokenSecretName) - } - - switch provider { - case "vault": - // Set the env variable - err = os.Setenv(api.EnvVaultToken, string(token)) - if err != nil { - return errors.Wrap(err, "failed to set vault kms token to an env var") - } - } - } - - // Lookup mandatory connection details - for _, config := range kmsMandatoryConnectionDetails { - if GetParam(securitySpec.KeyManagementService.ConnectionDetails, config) == "" { - return errors.Errorf("failed to validate kms config %q. cannot be empty", config) - } - } - - // Validate KMS provider connection details - switch provider { - case "vault": - err := validateVaultConnectionDetails(clusterdContext, ns, securitySpec.KeyManagementService.ConnectionDetails) - if err != nil { - return errors.Wrap(err, "failed to validate vault connection details") - } - - secretEngine := securitySpec.KeyManagementService.ConnectionDetails[VaultSecretEngineKey] - switch secretEngine { - case VaultKVSecretEngineKey: - // Append Backend Version if not already present - if GetParam(securitySpec.KeyManagementService.ConnectionDetails, vault.VaultBackendKey) == "" { - backendVersion, err := BackendVersion(securitySpec.KeyManagementService.ConnectionDetails) - if err != nil { - return errors.Wrap(err, "failed to get backend version") - } - securitySpec.KeyManagementService.ConnectionDetails[vault.VaultBackendKey] = backendVersion - } - } - default: - return errors.Errorf("failed to validate kms provider connection details (provider %q not supported)", provider) - } - - return nil -} - -// SetTokenToEnvVar sets a KMS token as an env variable -func SetTokenToEnvVar(clusterdContext *clusterd.Context, tokenSecretName, provider, namespace string) error { - ctx := context.TODO() - // Get the secret containing the kms token - kmsToken, err := clusterdContext.Clientset.CoreV1().Secrets(namespace).Get(ctx, tokenSecretName, metav1.GetOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to fetch kms token secret %q", tokenSecretName) - } - - // We set the token as an env variable, the secrets lib will pick it up - var key, value string - switch provider { - case secrets.TypeVault: - key = api.EnvVaultToken - value = string(kmsToken.Data[KMSTokenSecretNameKey]) - default: - logger.Debugf("unknown provider %q return nil", provider) - return nil - } - - // Set the env variable - err = os.Setenv(key, value) - if err != nil { - return errors.Wrap(err, "failed to set kms token to an env var") - } - - return nil -} diff --git a/pkg/daemon/ceph/osd/kms/kms_test.go b/pkg/daemon/ceph/osd/kms/kms_test.go deleted file mode 100644 index 36cbd1f94..000000000 --- a/pkg/daemon/ceph/osd/kms/kms_test.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "context" - "os" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestValidateConnectionDetails(t *testing.T) { - ctx := context.TODO() - // Placeholder - context := &clusterd.Context{Clientset: test.New(t, 3)} - securitySpec := cephv1.SecuritySpec{KeyManagementService: cephv1.KeyManagementServiceSpec{ConnectionDetails: map[string]string{}}} - ns := "rook-ceph" - - // Error: no token in spec - err := ValidateConnectionDetails(context, securitySpec, ns) - assert.Error(t, err, "") - assert.EqualError(t, err, "failed to validate kms configuration (missing token in spec)") - - securitySpec.KeyManagementService.TokenSecretName = "vault-token" - - err = ValidateConnectionDetails(context, securitySpec, ns) - assert.Error(t, err, "") - assert.EqualError(t, err, "failed to fetch kms token secret \"vault-token\": secrets \"vault-token\" not found") - - // Error: token secret present but empty content - s := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: securitySpec.KeyManagementService.TokenSecretName, - Namespace: ns, - }, - } - _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, s, metav1.CreateOptions{}) - assert.NoError(t, err) - err = ValidateConnectionDetails(context, securitySpec, ns) - assert.Error(t, err, "") - assert.EqualError(t, err, "failed to read k8s kms secret \"token\" key \"vault-token\" (not found or empty)") - - // Error: token key does not exist - s.Data = map[string][]byte{"foo": []byte("bar")} - _, err = context.Clientset.CoreV1().Secrets(ns).Update(ctx, s, metav1.UpdateOptions{}) - assert.NoError(t, err) - err = ValidateConnectionDetails(context, securitySpec, ns) - assert.Error(t, err, "") - assert.EqualError(t, err, "failed to read k8s kms secret \"token\" key \"vault-token\" (not found or empty)") - - // Success: token content is ok - s.Data["token"] = []byte("myt-otkenbenvqrev") - _, err = context.Clientset.CoreV1().Secrets(ns).Update(ctx, s, metav1.UpdateOptions{}) - assert.NoError(t, err) - err = ValidateConnectionDetails(context, securitySpec, ns) - assert.Error(t, err, "") - assert.EqualError(t, err, "failed to validate kms config \"KMS_PROVIDER\". cannot be empty") - securitySpec.KeyManagementService.ConnectionDetails["KMS_PROVIDER"] = "vault" - - // Error: Data has a KMS_PROVIDER but missing details - err = ValidateConnectionDetails(context, securitySpec, ns) - assert.Error(t, err, "") - assert.EqualError(t, err, "failed to validate vault connection details: failed to find connection details \"VAULT_ADDR\"") - - // Error: connection details are correct but the token secret does not exist - securitySpec.KeyManagementService.ConnectionDetails["VAULT_ADDR"] = "https://1.1.1.1:8200" - securitySpec.KeyManagementService.ConnectionDetails["VAULT_BACKEND"] = "v1" - - // Error: TLS is configured but secrets do not exist - securitySpec.KeyManagementService.ConnectionDetails["VAULT_CACERT"] = "vault-ca-secret" - err = ValidateConnectionDetails(context, securitySpec, ns) - assert.Error(t, err, "") - assert.EqualError(t, err, "failed to validate vault connection details: failed to find TLS connection details k8s secret \"VAULT_CACERT\"") - - // Error: TLS secret exists but empty key - tlsSecret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vault-ca-secret", - Namespace: ns, - }, - } - _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, tlsSecret, metav1.CreateOptions{}) - assert.NoError(t, err) - err = ValidateConnectionDetails(context, securitySpec, ns) - assert.Error(t, err, "") - assert.EqualError(t, err, "failed to validate vault connection details: failed to find TLS connection key \"cert\" for \"VAULT_CACERT\" in k8s secret \"vault-ca-secret\"") - - // Success: TLS config is correct - tlsSecret.Data = map[string][]byte{"cert": []byte("envnrevbnbvsbjkrtn")} - _, err = context.Clientset.CoreV1().Secrets(ns).Update(ctx, tlsSecret, metav1.UpdateOptions{}) - assert.NoError(t, err) - err = ValidateConnectionDetails(context, securitySpec, ns) - assert.NoError(t, err, "") -} - -func TestSetTokenToEnvVar(t *testing.T) { - ctx := context.TODO() - context := &clusterd.Context{Clientset: test.New(t, 3)} - secretName := "vault-secret" - ns := "rook-ceph" - s := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: ns, - }, - Data: map[string][]byte{"token": []byte("toto")}, - } - _, err := context.Clientset.CoreV1().Secrets(ns).Create(ctx, s, metav1.CreateOptions{}) - assert.NoError(t, err) - - err = SetTokenToEnvVar(context, secretName, "vault", ns) - assert.NoError(t, err) - assert.Equal(t, os.Getenv("VAULT_TOKEN"), "toto") - os.Unsetenv("VAULT_TOKEN") -} diff --git a/pkg/daemon/ceph/osd/kms/vault.go b/pkg/daemon/ceph/osd/kms/vault.go deleted file mode 100644 index 5948c2fe3..000000000 --- a/pkg/daemon/ceph/osd/kms/vault.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "context" - "io/ioutil" - "strings" - - "github.com/hashicorp/vault/api" - "github.com/libopenstorage/secrets" - "github.com/libopenstorage/secrets/vault" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // EtcVaultDir is vault config dir - EtcVaultDir = "/etc/vault" - // VaultSecretEngineKey is the type of secret engine used (kv, transit) - VaultSecretEngineKey = "VAULT_SECRET_ENGINE" - // VaultKVSecretEngineKey is a kv secret engine type - VaultKVSecretEngineKey = "kv" - // VaultTransitSecretEngineKey is a transit secret engine type - VaultTransitSecretEngineKey = "transit" -) - -var ( - vaultMandatoryConnectionDetails = []string{api.EnvVaultAddress} -) - -/* VAULT API INTERNAL VALUES -// Refer to https://pkg.golangclub.com/github.com/hashicorp/vault/api?tab=doc#pkg-constants - const EnvVaultAddress = "VAULT_ADDR" - const EnvVaultAgentAddr = "VAULT_AGENT_ADDR" - const EnvVaultCACert = "VAULT_CACERT" - const EnvVaultCAPath = "VAULT_CAPATH" - const EnvVaultClientCert = "VAULT_CLIENT_CERT" - const EnvVaultClientKey = "VAULT_CLIENT_KEY" - const EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" - const EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" - const EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" - const EnvVaultNamespace = "VAULT_NAMESPACE" - const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" - const EnvVaultWrapTTL = "VAULT_WRAP_TTL" - const EnvVaultMaxRetries = "VAULT_MAX_RETRIES" - const EnvVaultToken = "VAULT_TOKEN" - const EnvVaultMFA = "VAULT_MFA" - const EnvRateLimit = "VAULT_RATE_LIMIT" -*/ - -// InitVault inits the secret store -func InitVault(context *clusterd.Context, namespace string, config map[string]string) (secrets.Secrets, error) { - c := make(map[string]interface{}) - - // So that we don't alter the content of c.config for later iterations - // We just want to swap the name of the TLS config secret name --> file name for the kms lib - oriConfig := make(map[string]string) - for k, v := range config { - oriConfig[k] = v - } - - // Populate TLS config - newConfigWithTLS, err := configTLS(context, namespace, oriConfig) - if err != nil { - return nil, errors.Wrap(err, "failed to initialize vault tls configuration") - } - - // Populate TLS config - for key, value := range newConfigWithTLS { - c[key] = string(value) - } - - // Initialize Vault - v, err := vault.New(c) - if err != nil { - return nil, errors.Wrap(err, "failed to initialize vault secret store") - } - - return v, nil -} - -func configTLS(clusterdContext *clusterd.Context, namespace string, config map[string]string) (map[string]string, error) { - ctx := context.TODO() - for _, tlsOption := range cephv1.VaultTLSConnectionDetails { - tlsSecretName := GetParam(config, tlsOption) - if tlsSecretName == "" { - continue - } - // If the string already has the correct path /etc/vault, we are in provisioner code and all the envs have been populated by the op already - if !strings.Contains(tlsSecretName, EtcVaultDir) { - secret, err := clusterdContext.Clientset.CoreV1().Secrets(namespace).Get(ctx, tlsSecretName, v1.GetOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "failed to fetch tls k8s secret %q", tlsSecretName) - } - - // Generate a temp file - file, err := ioutil.TempFile("", "") - if err != nil { - return nil, errors.Wrapf(err, "failed to generate temp file for k8s secret %q content", tlsSecretName) - } - - // Write into a file - err = ioutil.WriteFile(file.Name(), secret.Data[tlsSecretKeyToCheck(tlsOption)], 0444) - if err != nil { - return nil, errors.Wrapf(err, "failed to write k8s secret %q content to a file", tlsSecretName) - } - - logger.Debugf("replacing %q current content %q with %q", tlsOption, config[tlsOption], file.Name()) - - // update the env var with the path - config[tlsOption] = file.Name() - } else { - logger.Debugf("value of tlsOption %q tlsSecretName is already correct %q", tlsOption, tlsSecretName) - } - } - - return config, nil -} - -func put(v secrets.Secrets, secretName, secretValue string, keyContext map[string]string) error { - // First we must see if the key entry already exists, if it does we do nothing - key, err := get(v, secretName, keyContext) - if err != nil && err != secrets.ErrInvalidSecretId { - return errors.Wrapf(err, "failed to get secret %q in vault", secretName) - } - if key != "" { - logger.Debugf("key %q already exists in vault!", secretName) - return nil - } - - // Build Secret - data := make(map[string]interface{}) - data[secretName] = secretValue - - // #nosec G104 Write the encryption key in Vault - err = v.PutSecret(secretName, data, keyContext) - if err != nil { - return errors.Wrapf(err, "failed to put secret %q in vault", secretName) - } - - return nil -} - -func get(v secrets.Secrets, secretName string, keyContext map[string]string) (string, error) { - // #nosec G104 Write the encryption key in Vault - s, err := v.GetSecret(secretName, keyContext) - if err != nil { - return "", err - } - - return s[secretName].(string), nil -} - -func delete(v secrets.Secrets, secretName string, keyContext map[string]string) error { - // #nosec G104 Write the encryption key in Vault - err := v.DeleteSecret(secretName, keyContext) - if err != nil { - return errors.Wrapf(err, "failed to delete secret %q in vault", secretName) - } - - return nil -} - -func buildKeyContext(config map[string]string) map[string]string { - // Key context is just the Vault namespace, available in the enterprise version only - keyContext := map[string]string{secrets.KeyVaultNamespace: config[api.EnvVaultNamespace]} - vaultNamespace, ok := config[api.EnvVaultNamespace] - if !ok || vaultNamespace == "" { - keyContext = nil - } - - return keyContext -} - -// IsVault determines whether the configured KMS is Vault -func (c *Config) IsVault() bool { - return c.Provider == "vault" -} - -func validateVaultConnectionDetails(clusterdContext *clusterd.Context, ns string, kmsConfig map[string]string) error { - ctx := context.TODO() - for _, option := range vaultMandatoryConnectionDetails { - if GetParam(kmsConfig, option) == "" { - return errors.Errorf("failed to find connection details %q", option) - } - } - - // We do not support a directory with multiple CA since we fetch a k8s Secret and read its content - // So we operate with a single CA only - if GetParam(kmsConfig, api.EnvVaultCAPath) != "" { - return errors.Errorf("failed to validate TLS connection details. %q is not supported. use %q instead", api.EnvVaultCAPath, api.EnvVaultCACert) - } - - // Validate potential TLS configuration - for _, tlsOption := range cephv1.VaultTLSConnectionDetails { - tlsSecretName := GetParam(kmsConfig, tlsOption) - if tlsSecretName != "" { - // Fetch the secret - s, err := clusterdContext.Clientset.CoreV1().Secrets(ns).Get(ctx, tlsSecretName, v1.GetOptions{}) - if err != nil { - return errors.Errorf("failed to find TLS connection details k8s secret %q", tlsOption) - } - - // Check the Secret key and its content - keyToCheck := tlsSecretKeyToCheck(tlsOption) - cert, ok := s.Data[keyToCheck] - if !ok || len(cert) == 0 { - return errors.Errorf("failed to find TLS connection key %q for %q in k8s secret %q", keyToCheck, tlsOption, tlsSecretName) - } - } - } - - return nil -} - -func tlsSecretKeyToCheck(tlsOption string) string { - if tlsOption == api.EnvVaultCACert || tlsOption == api.EnvVaultClientCert { - return vaultCACertSecretKeyName - } else if tlsOption == api.EnvVaultClientKey { - return vaultKeySecretKeyName - } - - return "" -} diff --git a/pkg/daemon/ceph/osd/kms/vault_api.go b/pkg/daemon/ceph/osd/kms/vault_api.go deleted file mode 100644 index 469c6e9f5..000000000 --- a/pkg/daemon/ceph/osd/kms/vault_api.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "os" - "strings" - - "github.com/libopenstorage/secrets/vault" - "github.com/libopenstorage/secrets/vault/utils" - "github.com/pkg/errors" - - "github.com/hashicorp/vault/api" -) - -const ( - kvVersionKey = "version" - kvVersion1 = "kv" - kvVersion2 = "kv-v2" -) - -// newVaultClient returns a vault client, there is no need for any secretConfig validation -// Since this is called after an already validated call InitVault() -func newVaultClient(secretConfig map[string]string) (*api.Client, error) { - // DefaultConfig uses the environment variables if present. - config := api.DefaultConfig() - - // Convert map string to map interface - c := make(map[string]interface{}) - for k, v := range secretConfig { - c[k] = v - } - - // Configure TLS - if err := utils.ConfigureTLS(config, c); err != nil { - return nil, err - } - - // Initialize the vault client - client, err := api.NewClient(config) - if err != nil { - return nil, err - } - - // Set the token if provided, token should be set by ValidateConnectionDetails() if applicable - // api.NewClient() already looks up the token from the environment but we need to set it here and remove potential malformed tokens - client.SetToken(strings.TrimSuffix(os.Getenv(api.EnvVaultToken), "\n")) - - // Set Vault address, was validated by ValidateConnectionDetails() - err = client.SetAddress(strings.TrimSuffix(secretConfig[api.EnvVaultAddress], "\n")) - if err != nil { - return nil, err - } - - return client, nil -} - -func BackendVersion(secretConfig map[string]string) (string, error) { - v1 := "v1" - v2 := "v2" - - backendPath := GetParam(secretConfig, vault.VaultBackendPathKey) - if backendPath == "" { - backendPath = vault.DefaultBackendPath - } - - backend := GetParam(secretConfig, vault.VaultBackendKey) - switch backend { - case kvVersion1, v1: - logger.Info("vault kv secret engine version set to v1") - return v1, nil - case kvVersion2, v2: - logger.Info("vault kv secret engine version set to v2") - return v2, nil - default: - // Initialize Vault client - vaultClient, err := newVaultClient(secretConfig) - if err != nil { - return "", errors.Wrap(err, "failed to initialize vault client") - } - - mounts, err := vaultClient.Sys().ListMounts() - if err != nil { - return "", errors.Wrap(err, "failed to list vault system mounts") - } - - for path, mount := range mounts { - // path is represented as 'path/' - if trimSlash(path) == trimSlash(backendPath) { - version := mount.Options[kvVersionKey] - if version == "2" { - logger.Info("vault kv secret engine version auto-detected to v2") - return v2, nil - } - logger.Info("vault kv secret engine version auto-detected to v1") - return v1, nil - } - } - } - - return "", errors.Errorf("secrets engine with mount path %q not found", backendPath) -} - -func trimSlash(in string) string { - return strings.Trim(in, "/") -} diff --git a/pkg/daemon/ceph/osd/kms/vault_test.go b/pkg/daemon/ceph/osd/kms/vault_test.go deleted file mode 100644 index c7c8e1ffa..000000000 --- a/pkg/daemon/ceph/osd/kms/vault_test.go +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "context" - "testing" - - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func Test_tlsSecretKeyToCheck(t *testing.T) { - type args struct { - tlsOption string - } - tests := []struct { - name string - args args - want string - }{ - {"certificate", args{tlsOption: "VAULT_CACERT"}, "cert"}, - {"client-certificate", args{tlsOption: "VAULT_CLIENT_CERT"}, "cert"}, - {"client-key", args{tlsOption: "VAULT_CLIENT_KEY"}, "key"}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tlsSecretKeyToCheck(tt.args.tlsOption); got != tt.want { - t.Errorf("tlsSecretKeyToCheck() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_configTLS(t *testing.T) { - ctx := context.TODO() - config := map[string]string{ - "foo": "bar", - "KMS_PROVIDER": "vault", - "VAULT_ADDR": "1.1.1.1", - "VAULT_BACKEND_PATH": "vault", - } - ns := "rook-ceph" - context := &clusterd.Context{Clientset: test.New(t, 3)} - - // No tls config - _, err := configTLS(context, ns, config) - assert.NoError(t, err) - - // TLS config with correct values - config = map[string]string{ - "foo": "bar", - "KMS_PROVIDER": "vault", - "VAULT_ADDR": "1.1.1.1", - "VAULT_BACKEND_PATH": "vault", - "VAULT_CACERT": "/etc/vault/cacert", - "VAULT_SKIP_VERIFY": "false", - } - config, err = configTLS(context, ns, config) - assert.NoError(t, err) - assert.Equal(t, "/etc/vault/cacert", config["VAULT_CACERT"]) - - // TLS config but no secret - config = map[string]string{ - "foo": "bar", - "KMS_PROVIDER": "vault", - "VAULT_ADDR": "1.1.1.1", - "VAULT_BACKEND_PATH": "vault", - "VAULT_CACERT": "vault-ca-cert", - "VAULT_SKIP_VERIFY": "false", - } - _, err = configTLS(context, ns, config) - assert.Error(t, err) - assert.EqualError(t, err, "failed to fetch tls k8s secret \"vault-ca-cert\": secrets \"vault-ca-cert\" not found") - - // TLS config success! - config = map[string]string{ - "foo": "bar", - "KMS_PROVIDER": "vault", - "VAULT_ADDR": "1.1.1.1", - "VAULT_BACKEND_PATH": "vault", - "VAULT_CACERT": "vault-ca-cert", - "VAULT_SKIP_VERIFY": "false", - } - s := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vault-ca-cert", - Namespace: ns, - }, - Data: map[string][]byte{"cert": []byte("bar")}, - } - _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, s, metav1.CreateOptions{}) - assert.NoError(t, err) - config, err = configTLS(context, ns, config) - assert.NoError(t, err) - assert.NotEqual(t, "vault-ca-cert", config["VAULT_CACERT"]) - err = context.Clientset.CoreV1().Secrets(ns).Delete(ctx, s.Name, metav1.DeleteOptions{}) - assert.NoError(t, err) - - // All TLS success! - config = map[string]string{ - "foo": "bar", - "KMS_PROVIDER": "vault", - "VAULT_ADDR": "1.1.1.1", - "VAULT_BACKEND_PATH": "vault", - "VAULT_CACERT": "vault-ca-cert", - "VAULT_CLIENT_CERT": "vault-client-cert", - "VAULT_CLIENT_KEY": "vault-client-key", - } - sCa := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vault-ca-cert", - Namespace: ns, - }, - Data: map[string][]byte{"cert": []byte("bar")}, - } - sClCert := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vault-client-cert", - Namespace: ns, - }, - Data: map[string][]byte{"cert": []byte("bar")}, - } - sClKey := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vault-client-key", - Namespace: ns, - }, - Data: map[string][]byte{"key": []byte("bar")}, - } - _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, sCa, metav1.CreateOptions{}) - assert.NoError(t, err) - _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, sClCert, metav1.CreateOptions{}) - assert.NoError(t, err) - _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, sClKey, metav1.CreateOptions{}) - assert.NoError(t, err) - config, err = configTLS(context, ns, config) - assert.NoError(t, err) - assert.NotEqual(t, "vault-ca-cert", config["VAULT_CACERT"]) - assert.NotEqual(t, "vault-client-cert", config["VAULT_CLIENT_CERT"]) - assert.NotEqual(t, "vault-client-key", config["VAULT_CLIENT_KEY"]) -} diff --git a/pkg/daemon/ceph/osd/kms/volumes.go b/pkg/daemon/ceph/osd/kms/volumes.go deleted file mode 100644 index 9075b6af0..000000000 --- a/pkg/daemon/ceph/osd/kms/volumes.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "github.com/hashicorp/vault/api" - "github.com/libopenstorage/secrets" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/api/core/v1" -) - -const ( - // Key name of the Secret containing the cert and client key - vaultCACertSecretKeyName = "cert" - vaultKeySecretKeyName = "key" - - // File names of the Secret value when mapping on the filesystem - vaultCAFileName = "vault.ca" - vaultCertFileName = "vault.crt" - vaultKeyFileName = "vault.key" - - // File name for token file - VaultFileName = "vault.token" -) - -// TLSSecretVolumeAndMount return the volume and matching volume mount for mounting the secrets into /etc/vault -func TLSSecretVolumeAndMount(config map[string]string) []v1.VolumeProjection { - // Projection list - secretVolumeProjections := []v1.VolumeProjection{} - - // File mode - mode := int32(0400) - - // Vault TLS Secrets - for _, tlsOption := range cephv1.VaultTLSConnectionDetails { - tlsSecretName := GetParam(config, tlsOption) - if tlsSecretName != "" { - projectionSecret := &v1.SecretProjection{Items: []v1.KeyToPath{{Key: tlsSecretKeyToCheck(tlsOption), Path: tlsSecretPath(tlsOption), Mode: &mode}}} - projectionSecret.Name = tlsSecretName - secretProjection := v1.VolumeProjection{Secret: projectionSecret} - secretVolumeProjections = append(secretVolumeProjections, secretProjection) - } - } - - return secretVolumeProjections -} - -// VaultVolumeAndMount returns Vault volume and volume mount -func VaultVolumeAndMount(config map[string]string) (v1.Volume, v1.VolumeMount) { - v := v1.Volume{ - Name: secrets.TypeVault, - VolumeSource: v1.VolumeSource{ - Projected: &v1.ProjectedVolumeSource{ - Sources: TLSSecretVolumeAndMount(config), - }, - }, - } - - m := v1.VolumeMount{ - Name: secrets.TypeVault, - ReadOnly: true, - MountPath: EtcVaultDir, - } - - return v, m -} - -func tlsSecretPath(tlsOption string) string { - switch tlsOption { - case api.EnvVaultCACert: - return vaultCAFileName - case api.EnvVaultClientCert: - return vaultCertFileName - case api.EnvVaultClientKey: - return vaultKeyFileName - - } - - return "" -} - -// VaultTokenFileVolume save token from secret as volume mount -func VaultTokenFileVolume(tokenSecretName string) v1.Volume { - return v1.Volume{ - Name: secrets.TypeVault, - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: tokenSecretName, - Items: []v1.KeyToPath{ - {Key: KMSTokenSecretNameKey, Path: VaultFileName}, - }}}} -} diff --git a/pkg/daemon/ceph/osd/kms/volumes_test.go b/pkg/daemon/ceph/osd/kms/volumes_test.go deleted file mode 100644 index f976159f4..000000000 --- a/pkg/daemon/ceph/osd/kms/volumes_test.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kms - -import ( - "reflect" - "testing" - - v1 "k8s.io/api/core/v1" -) - -func Test_tlsSecretPath(t *testing.T) { - type args struct { - tlsOption string - } - tests := []struct { - name string - args args - want string - }{ - {"certificate", args{tlsOption: "VAULT_CACERT"}, "vault.ca"}, - {"client-certificate", args{tlsOption: "VAULT_CLIENT_CERT"}, "vault.crt"}, - {"client-key", args{tlsOption: "VAULT_CLIENT_KEY"}, "vault.key"}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tlsSecretPath(tt.args.tlsOption); got != tt.want { - t.Errorf("tlsSecretPath() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestTLSSecretVolumeAndMount(t *testing.T) { - m := int32(0400) - type args struct { - config map[string]string - } - tests := []struct { - name string - args args - want []v1.VolumeProjection - }{ - {"empty", args{config: map[string]string{"foo": "bar"}}, []v1.VolumeProjection{}}, - {"single ca", args{config: map[string]string{"VAULT_CACERT": "vault-ca-secret"}}, []v1.VolumeProjection{ - {Secret: &v1.SecretProjection{LocalObjectReference: v1.LocalObjectReference{Name: "vault-ca-secret"}, Items: []v1.KeyToPath{{Key: "cert", Path: "vault.ca", Mode: &m}}, Optional: nil}}}, - }, - {"ca and client cert", args{config: map[string]string{"VAULT_CACERT": "vault-ca-secret", "VAULT_CLIENT_CERT": "vault-client-cert"}}, []v1.VolumeProjection{ - {Secret: &v1.SecretProjection{LocalObjectReference: v1.LocalObjectReference{Name: "vault-ca-secret"}, Items: []v1.KeyToPath{{Key: "cert", Path: "vault.ca", Mode: &m}}, Optional: nil}}, - {Secret: &v1.SecretProjection{LocalObjectReference: v1.LocalObjectReference{Name: "vault-client-cert"}, Items: []v1.KeyToPath{{Key: "cert", Path: "vault.crt", Mode: &m}}, Optional: nil}}, - }}, - {"ca and client cert/key", args{config: map[string]string{"VAULT_CACERT": "vault-ca-secret", "VAULT_CLIENT_CERT": "vault-client-cert", "VAULT_CLIENT_KEY": "vault-client-key"}}, []v1.VolumeProjection{ - {Secret: &v1.SecretProjection{LocalObjectReference: v1.LocalObjectReference{Name: "vault-ca-secret"}, Items: []v1.KeyToPath{{Key: "cert", Path: "vault.ca", Mode: &m}}, Optional: nil}}, - {Secret: &v1.SecretProjection{LocalObjectReference: v1.LocalObjectReference{Name: "vault-client-cert"}, Items: []v1.KeyToPath{{Key: "cert", Path: "vault.crt", Mode: &m}}, Optional: nil}}, - {Secret: &v1.SecretProjection{LocalObjectReference: v1.LocalObjectReference{Name: "vault-client-key"}, Items: []v1.KeyToPath{{Key: "key", Path: "vault.key", Mode: &m}}, Optional: nil}}, - }}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := TLSSecretVolumeAndMount(tt.args.config); !reflect.DeepEqual(got, tt.want) { - t.Errorf("TLSSecretVolumeAndMount() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/daemon/ceph/osd/nsenter.go b/pkg/daemon/ceph/osd/nsenter.go deleted file mode 100644 index e44eb867f..000000000 --- a/pkg/daemon/ceph/osd/nsenter.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" -) - -const ( - // mountNsPath is the default mount namespace of the host - mountNsPath = "/rootfs/proc/1/ns/mnt" - // nsenterCmd is the nsenter command - nsenterCmd = "nsenter" - rootFSPath = "/rootfs" -) - -var ( - binPathsToCheck = []string{"/usr/sbin", "/sbin/"} -) - -// NSEnter is an nsenter object -type NSEnter struct { - context *clusterd.Context - binary string - binaryArgs []string -} - -// NewNsenter returns an instance of the NSEnter object -func NewNsenter(context *clusterd.Context, binary string, binaryArgs []string) *NSEnter { - return &NSEnter{ - context: context, - binary: binary, - binaryArgs: binaryArgs, - } -} - -func (ne *NSEnter) buildNsEnterCLI(binPath string) []string { - baseArgs := []string{fmt.Sprintf("--mount=%s", mountNsPath), "--", binPath} - baseArgs = append(baseArgs, ne.binaryArgs...) - - return baseArgs -} - -func (ne *NSEnter) callNsEnter(binPath string) error { - args := ne.buildNsEnterCLI(binPath) - op, err := ne.context.Executor.ExecuteCommandWithCombinedOutput(nsenterCmd, args...) - if err != nil { - return errors.Wrapf(err, "failed to execute nsenter. output: %s", op) - } - - logger.Info("successfully called nsenter") - return nil -} - -func (ne *NSEnter) checkIfBinaryExistsOnHost() error { - for _, path := range binPathsToCheck { - binPath := filepath.Join(path, ne.binary) - // Check with nsenter first - err := ne.callNsEnter(binPath) - if err != nil { - logger.Debugf("failed to call nsenter. %v", err) - // If nsenter failed, let's try with the rootfs directly but only lookup the binary and do not execute it - // This avoids mismatch libraries between the container and the host while executing - rootFSBinPath := filepath.Join(rootFSPath, binPath) - _, err := os.Stat(rootFSBinPath) - if err != nil { - logger.Debugf("failed to lookup binary path %q on the host rootfs. %v", rootFSBinPath, err) - continue - } - binPath = rootFSBinPath - } - logger.Infof("binary %q found on the host, proceeding with osd preparation", binPath) - return nil - } - - return errors.Errorf("binary %q does not exist on the host", ne.binary) -} diff --git a/pkg/daemon/ceph/osd/nsenter_test.go b/pkg/daemon/ceph/osd/nsenter_test.go deleted file mode 100644 index f6c70cf55..000000000 --- a/pkg/daemon/ceph/osd/nsenter_test.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "path/filepath" - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestBuildNsEnterCLI(t *testing.T) { - ne := NewNsenter(&clusterd.Context{}, lvmCommandToCheck, []string{"help"}) - args := ne.buildNsEnterCLI(filepath.Join("/sbin/", ne.binary)) - expectedCLI := []string{"--mount=/rootfs/proc/1/ns/mnt", "--", "/sbin/lvm", "help"} - - assert.Equal(t, expectedCLI, args) -} - -func TestCheckIfBinaryExistsOnHost(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if command == "nsenter" && args[0] == "--mount=/rootfs/proc/1/ns/mnt" && args[1] == "--" && args[3] == "help" { - if args[2] == "/usr/sbin/lvm" || args[2] == "/sbin/lvm" { - return "success", nil - } - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - - context := &clusterd.Context{Executor: executor} - ne := NewNsenter(context, lvmCommandToCheck, []string{"help"}) - err := ne.checkIfBinaryExistsOnHost() - assert.NoError(t, err) -} diff --git a/pkg/daemon/ceph/osd/remove.go b/pkg/daemon/ceph/osd/remove.go deleted file mode 100644 index a4f032666..000000000 --- a/pkg/daemon/ceph/osd/remove.go +++ /dev/null @@ -1,183 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "context" - "fmt" - "strconv" - - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - "github.com/rook/rook/pkg/operator/k8sutil" -) - -// RemoveOSDs purges a list of OSDs from the cluster -func RemoveOSDs(context *clusterd.Context, clusterInfo *client.ClusterInfo, osdsToRemove []string, preservePVC bool) error { - - // Generate the ceph config for running ceph commands similar to the operator - if err := client.WriteCephConfig(context, clusterInfo); err != nil { - return errors.Wrap(err, "failed to write the ceph config") - } - - osdDump, err := client.GetOSDDump(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get osd dump") - } - - for _, osdIDStr := range osdsToRemove { - osdID, err := strconv.Atoi(osdIDStr) - if err != nil { - logger.Errorf("invalid OSD ID: %s. %v", osdIDStr, err) - continue - } - logger.Infof("validating status of osd.%d", osdID) - status, _, err := osdDump.StatusByID(int64(osdID)) - if err != nil { - return errors.Wrapf(err, "failed to get osd status for osd %d", osdID) - } - const upStatus int64 = 1 - if status == upStatus { - logger.Infof("osd.%d is healthy. It cannot be removed unless it is 'down'", osdID) - continue - } - logger.Infof("osd.%d is marked 'DOWN'. Removing it", osdID) - removeOSD(context, clusterInfo, osdID, preservePVC) - } - - return nil -} - -func removeOSD(clusterdContext *clusterd.Context, clusterInfo *client.ClusterInfo, osdID int, preservePVC bool) { - ctx := context.TODO() - // Get the host where the OSD is found - hostName, err := client.GetCrushHostName(clusterdContext, clusterInfo, osdID) - if err != nil { - logger.Errorf("failed to get the host where osd.%d is running. %v", osdID, err) - } - - // Mark the OSD as out. - args := []string{"osd", "out", fmt.Sprintf("osd.%d", osdID)} - _, err = client.NewCephCommand(clusterdContext, clusterInfo, args).Run() - if err != nil { - logger.Errorf("failed to exclude osd.%d out of the crush map. %v", osdID, err) - } - - // Remove the OSD deployment - deploymentName := fmt.Sprintf("rook-ceph-osd-%d", osdID) - deployment, err := clusterdContext.Clientset.AppsV1().Deployments(clusterInfo.Namespace).Get(ctx, deploymentName, metav1.GetOptions{}) - if err != nil { - logger.Errorf("failed to fetch the deployment %q. %v", deploymentName, err) - } else { - logger.Infof("removing the OSD deployment %q", deploymentName) - if err := k8sutil.DeleteDeployment(clusterdContext.Clientset, clusterInfo.Namespace, deploymentName); err != nil { - if err != nil { - // Continue purging the OSD even if the deployment fails to be deleted - logger.Errorf("failed to delete deployment for OSD %d. %v", osdID, err) - } - } - if pvcName, ok := deployment.GetLabels()[osd.OSDOverPVCLabelKey]; ok { - labelSelector := fmt.Sprintf("%s=%s", osd.OSDOverPVCLabelKey, pvcName) - prepareJobList, err := clusterdContext.Clientset.BatchV1().Jobs(clusterInfo.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) - if err != nil && !kerrors.IsNotFound(err) { - logger.Errorf("failed to list osd prepare jobs with pvc %q. %v ", pvcName, err) - } - // Remove osd prepare job - for _, prepareJob := range prepareJobList.Items { - logger.Infof("removing the osd prepare job %q", prepareJob.GetName()) - if err := k8sutil.DeleteBatchJob(clusterdContext.Clientset, clusterInfo.Namespace, prepareJob.GetName(), false); err != nil { - if err != nil { - // Continue deleting the OSD prepare job even if the deployment fails to be deleted - logger.Errorf("failed to delete prepare job for osd %q. %v", prepareJob.GetName(), err) - } - } - } - if preservePVC { - // Detach the OSD PVC from Rook. We will continue OSD deletion even if failed to remove PVC label - logger.Infof("detach the OSD PVC %q from Rook", pvcName) - if pvc, err := clusterdContext.Clientset.CoreV1().PersistentVolumeClaims(clusterInfo.Namespace).Get(ctx, pvcName, metav1.GetOptions{}); err != nil { - logger.Errorf("failed to get pvc for OSD %q. %v", pvcName, err) - } else { - labels := pvc.GetLabels() - delete(labels, osd.CephDeviceSetPVCIDLabelKey) - pvc.SetLabels(labels) - if _, err := clusterdContext.Clientset.CoreV1().PersistentVolumeClaims(clusterInfo.Namespace).Update(ctx, pvc, metav1.UpdateOptions{}); err != nil { - logger.Errorf("failed to remove label %q from pvc for OSD %q. %v", osd.CephDeviceSetPVCIDLabelKey, pvcName, err) - } - } - } else { - // Remove the OSD PVC - logger.Infof("removing the OSD PVC %q", pvcName) - if err := clusterdContext.Clientset.CoreV1().PersistentVolumeClaims(clusterInfo.Namespace).Delete(ctx, pvcName, metav1.DeleteOptions{}); err != nil { - if err != nil { - // Continue deleting the OSD PVC even if PVC deletion fails - logger.Errorf("failed to delete pvc for OSD %q. %v", pvcName, err) - } - } - } - } else { - logger.Infof("did not find a pvc name to remove for osd %q", deploymentName) - } - } - - // purge the osd - purgeosdargs := []string{"osd", "purge", fmt.Sprintf("osd.%d", osdID), "--force", "--yes-i-really-mean-it"} - _, err = client.NewCephCommand(clusterdContext, clusterInfo, purgeosdargs).Run() - if err != nil { - logger.Errorf("failed to purge osd.%d. %v", osdID, err) - } - - // Attempting to remove the parent host. Errors can be ignored if there are other OSDs on the same host - hostargs := []string{"osd", "crush", "rm", hostName} - _, err = client.NewCephCommand(clusterdContext, clusterInfo, hostargs).Run() - if err != nil { - logger.Errorf("failed to remove CRUSH host %q. %v", hostName, err) - } - // call archiveCrash to silence crash warning in ceph health if any - archiveCrash(clusterdContext, clusterInfo, osdID) - - logger.Infof("completed removal of OSD %d", osdID) -} - -func archiveCrash(clusterdContext *clusterd.Context, clusterInfo *client.ClusterInfo, osdID int) { - // The ceph health warning should be silenced by archiving the crash - crash, err := client.GetCrash(clusterdContext, clusterInfo) - if err != nil { - logger.Errorf("failed to list ceph crash. %v", err) - return - } - if crash != nil { - logger.Info("no ceph crash to silence") - return - } - var crashID string - for _, c := range crash { - if c.Entity == fmt.Sprintf("osd.%d", osdID) { - crashID = c.ID - break - } - } - err = client.ArchiveCrash(clusterdContext, clusterInfo, crashID) - if err != nil { - logger.Errorf("failed to archive the crash %q. %v", crashID, err) - } -} diff --git a/pkg/daemon/ceph/osd/volume.go b/pkg/daemon/ceph/osd/volume.go deleted file mode 100644 index 0dda7c356..000000000 --- a/pkg/daemon/ceph/osd/volume.go +++ /dev/null @@ -1,1147 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - oposd "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/util/display" - "github.com/rook/rook/pkg/util/sys" -) - -const ( - osdsPerDeviceFlag = "--osds-per-device" - crushDeviceClassFlag = "--crush-device-class" - encryptedFlag = "--dmcrypt" - databaseSizeFlag = "--block-db-size" - dbDeviceFlag = "--db-devices" - cephVolumeCmd = "ceph-volume" - cephVolumeMinDBSize = 1024 // 1GB -) - -// These are not constants because they are used by the tests -var ( - cephConfigDir = "/var/lib/ceph" - cephLogDir = "/var/log/ceph" - lvmConfPath = "/etc/lvm/lvm.conf" - cvLogDir = "" - // The "ceph-volume raw" command is available since Ceph 14.2.8 as well as partition support in ceph-volume - cephVolumeRawModeMinCephVersion = cephver.CephVersion{Major: 14, Minor: 2, Extra: 8} - // The Ceph Nautilus to include a retry to acquire device lock - cephFlockFixNautilusMinCephVersion = cephver.CephVersion{Major: 14, Minor: 2, Extra: 14} - // The Ceph Octopus to include a retry to acquire device lock - cephFlockFixOctopusMinCephVersion = cephver.CephVersion{Major: 15, Minor: 2, Extra: 9} - isEncrypted = os.Getenv(oposd.EncryptedDeviceEnvVarName) == "true" - isOnPVC = os.Getenv(oposd.PVCBackedOSDVarName) == "true" -) - -type osdInfoBlock struct { - CephFsid string `json:"ceph_fsid"` - Device string `json:"device"` - OsdID int `json:"osd_id"` - OsdUUID string `json:"osd_uuid"` - Type string `json:"type"` -} - -type osdInfo struct { - Name string `json:"name"` - Path string `json:"path"` - Tags osdTags `json:"tags"` - // "block" for bluestore - Type string `json:"type"` -} - -type osdTags struct { - OSDFSID string `json:"ceph.osd_fsid"` - Encrypted string `json:"ceph.encrypted"` - ClusterFSID string `json:"ceph.cluster_fsid"` - CrushDeviceClass string `json:"ceph.crush_device_class"` -} - -type cephVolReport struct { - Changed bool `json:"changed"` - Vg cephVolVg `json:"vg"` -} - -type cephVolVg struct { - Devices string `json:"devices"` -} - -type cephVolReportV2 struct { - BlockDB string `json:"block_db"` - Encryption string `json:"encryption"` - Data string `json:"data"` - DatabaseSize string `json:"data_size"` - BlockDbSize string `json:"block_db_size"` -} - -func isNewStyledLvmBatch(version cephver.CephVersion) bool { - if version.IsNautilus() && version.IsAtLeast(cephver.CephVersion{Major: 14, Minor: 2, Extra: 13}) { - return true - } - - if version.IsOctopus() && version.IsAtLeast(cephver.CephVersion{Major: 15, Minor: 2, Extra: 8}) { - return true - } - - if version.IsAtLeastPacific() { - return true - } - - return false -} - -func (a *OsdAgent) configureCVDevices(context *clusterd.Context, devices *DeviceOsdMapping) ([]oposd.OSDInfo, error) { - var osds []oposd.OSDInfo - var lvmOsds []oposd.OSDInfo - var rawOsds []oposd.OSDInfo - var lvBackedPV bool - var block, lvPath, metadataBlock, walBlock string - var err error - - // Idempotency check, if the device list is empty devices have been prepared already - // In this case, just return the OSDInfo via a 'ceph-volume lvm|raw list' call - if len(devices.Entries) == 0 { - logger.Info("no new devices to configure. returning devices already configured with ceph-volume.") - - if a.pvcBacked { - // So many things changed and it's good to remember this commit and its logic - // See: https://github.com/rook/rook/commit/8ea693a74011c587970dfc28a3d9efe2ef329159 - skipLVRelease := true - - // For LV mode - lvPath = getDeviceLVPath(context, fmt.Sprintf("/mnt/%s", a.nodeName)) - lvBackedPV, err := sys.IsLV(fmt.Sprintf("/mnt/%s", a.nodeName), context.Executor) - if err != nil { - return nil, errors.Wrap(err, "failed to check device type") - } - - // List THE existing OSD configured with ceph-volume lvm mode - lvmOsds, err = GetCephVolumeLVMOSDs(context, a.clusterInfo, a.clusterInfo.FSID, lvPath, skipLVRelease, lvBackedPV) - if err != nil { - logger.Infof("failed to get device already provisioned by ceph-volume lvm. %v", err) - } - osds = append(osds, lvmOsds...) - if len(osds) > 0 { - // "ceph-volume raw list" lists the existing OSD even if it is configured with lvm mode, so escape here to avoid dupe. - return osds, nil - } - - // List THE existing OSD configured with ceph-volume raw mode - if a.clusterInfo.CephVersion.IsAtLeast(cephVolumeRawModeMinCephVersion) { - // For block mode - block = fmt.Sprintf("/mnt/%s", a.nodeName) - - // This is hard to determine a potential metadata device here - // Also, I don't think (leseb) this code we have run in this condition - // I tried several things: - // * evict a node, osd moves, the prepare job was never relaunched ever because we check for the osd deployment and skip the prepare - // * restarted the operator, again the prepare job was not re-run - // - // I'm leaving this code with an empty metadata device for now - metadataBlock, walBlock = "", "" - - rawOsds, err = GetCephVolumeRawOSDs(context, a.clusterInfo, a.clusterInfo.FSID, block, metadataBlock, walBlock, lvBackedPV, false) - if err != nil { - logger.Infof("failed to get device already provisioned by ceph-volume raw. %v", err) - } - osds = append(osds, rawOsds...) - } - - return osds, nil - } - - // List existing OSD(s) configured with ceph-volume lvm mode - lvmOsds, err = GetCephVolumeLVMOSDs(context, a.clusterInfo, a.clusterInfo.FSID, lvPath, false, false) - if err != nil { - logger.Infof("failed to get devices already provisioned by ceph-volume. %v", err) - } - osds = append(osds, lvmOsds...) - - // List existing OSD(s) configured with ceph-volume raw mode - rawOsds, err = GetCephVolumeRawOSDs(context, a.clusterInfo, a.clusterInfo.FSID, block, "", "", false, false) - if err != nil { - logger.Infof("failed to get device already provisioned by ceph-volume raw. %v", err) - } - osds = appendOSDInfo(osds, rawOsds) - - return osds, nil - } - - // Create OSD bootstrap keyring - err = createOSDBootstrapKeyring(context, a.clusterInfo, cephConfigDir) - if err != nil { - return nil, errors.Wrap(err, "failed to generate osd keyring") - } - - // Check if the PVC is an LVM block device (certain StorageClass do this) - if a.pvcBacked { - for _, device := range devices.Entries { - dev := device.Config.Name - lvBackedPV, err = sys.IsLV(dev, context.Executor) - if err != nil { - return nil, errors.Wrap(err, "failed to check device type") - } - break - } - } - - // Should we use ceph-volume raw mode? - useRawMode, err := a.useRawMode(context, a.pvcBacked) - if err != nil { - return nil, errors.Wrap(err, "failed to determine which ceph-volume mode to use") - } - - // If not raw mode we must execute a few LVM prerequisites - if !useRawMode { - err = lvmPreReq(context, a.pvcBacked, lvBackedPV) - if err != nil { - return nil, errors.Wrap(err, "failed to run lvm prerequisites") - } - } - - // If running on OSD on PVC - if a.pvcBacked { - if block, metadataBlock, walBlock, err = a.initializeBlockPVC(context, devices, lvBackedPV); err != nil { - return nil, errors.Wrap(err, "failed to initialize devices on PVC") - } - } else { - err := a.initializeDevices(context, devices, useRawMode) - if err != nil { - return nil, errors.Wrap(err, "failed to initialize osd") - } - } - - // List OSD configured with ceph-volume lvm mode - lvmOsds, err = GetCephVolumeLVMOSDs(context, a.clusterInfo, a.clusterInfo.FSID, block, false, lvBackedPV) - if err != nil { - return nil, errors.Wrap(err, "failed to get devices already provisioned by ceph-volume lvm") - } - osds = append(osds, lvmOsds...) - - // List THE configured OSD with ceph-volume raw mode - // When the block is encrypted we need to list against the encrypted device mapper - if !isEncrypted { - block = fmt.Sprintf("/mnt/%s", a.nodeName) - } - // List ALL OSDs when not running on PVC - if !a.pvcBacked { - block = "" - } - rawOsds, err = GetCephVolumeRawOSDs(context, a.clusterInfo, a.clusterInfo.FSID, block, metadataBlock, walBlock, lvBackedPV, false) - if err != nil { - return nil, errors.Wrap(err, "failed to get devices already provisioned by ceph-volume raw") - } - osds = appendOSDInfo(osds, rawOsds) - - return osds, err -} - -func (a *OsdAgent) initializeBlockPVC(context *clusterd.Context, devices *DeviceOsdMapping, lvBackedPV bool) (string, string, string, error) { - // we need to return the block if raw mode is used and the lv if lvm mode - baseCommand := "stdbuf" - var baseArgs []string - - cephVolumeMode := "lvm" - if a.clusterInfo.CephVersion.IsAtLeast(cephVolumeRawModeMinCephVersion) { - cephVolumeMode = "raw" - } else if lvBackedPV { - return "", "", "", errors.New("OSD on LV-backed PVC requires new Ceph to use raw mode") - } - - // Create a specific log directory so that each prepare command will have its own log - // Only do this if nothing is present so that we don't override existing logs - cvLogDir = path.Join(cephLogDir, a.nodeName) - err := os.MkdirAll(cvLogDir, 0750) - if err != nil { - logger.Errorf("failed to create ceph-volume log directory %q, continue with default %q. %v", cvLogDir, cephLogDir, err) - baseArgs = []string{"-oL", cephVolumeCmd, cephVolumeMode, "prepare", "--bluestore"} - } else { - // Always force Bluestore! - baseArgs = []string{"-oL", cephVolumeCmd, "--log-path", cvLogDir, cephVolumeMode, "prepare", "--bluestore"} - } - - var metadataArg, walArg []string - var metadataDev, walDev bool - var blockPath, metadataBlockPath, walBlockPath string - - // Problem: map is an unordered collection - // therefore the iteration order of a map is not guaranteed to be the same every time you iterate over it. - // So we could first get the metadata device and then the main block in a scenario where a metadata PVC is present - for name, device := range devices.Entries { - // If this is the metadata device there is nothing to do - // it'll be used in one of the iterations - if name == "metadata" || name == "wal" { - logger.Debugf("device %q is a metadata or wal device, skipping this iteration it will be used in the next one", device.Config.Name) - // Don't do this device - continue - } - - // When running on PVC, the prepare job has a single OSD only so 1 disk - // However we can present a metadata device so we need to consume it - // This will make the devices.Entries larger than usual - if _, ok := devices.Entries["metadata"]; ok { - metadataDev = true - metadataArg = append(metadataArg, []string{"--block.db", - devices.Entries["metadata"].Config.Name, - }...) - - metadataBlockPath = devices.Entries["metadata"].Config.Name - } - - if _, ok := devices.Entries["wal"]; ok { - walDev = true - walArg = append(walArg, []string{"--block.wal", - devices.Entries["wal"].Config.Name, - }...) - - walBlockPath = devices.Entries["wal"].Config.Name - } - - if device.Data == -1 { - logger.Infof("configuring new device %q", device.Config.Name) - var err error - var deviceArg string - - deviceArg = device.Config.Name - logger.Info("devlink names:") - for _, devlink := range device.PersistentDevicePaths { - logger.Info(devlink) - if strings.HasPrefix(devlink, "/dev/mapper") { - deviceArg = devlink - } - } - - immediateExecuteArgs := append(baseArgs, []string{ - "--data", - deviceArg, - }...) - - crushDeviceClass := os.Getenv(oposd.CrushDeviceClassVarName) - if crushDeviceClass != "" { - immediateExecuteArgs = append(immediateExecuteArgs, []string{crushDeviceClassFlag, crushDeviceClass}...) - } - - if isEncrypted { - immediateExecuteArgs = append(immediateExecuteArgs, encryptedFlag) - } - - // Add the cli argument for the metadata device - if metadataDev { - immediateExecuteArgs = append(immediateExecuteArgs, metadataArg...) - } - - // Add the cli argument for the wal device - if walDev { - immediateExecuteArgs = append(immediateExecuteArgs, walArg...) - } - - // execute ceph-volume with the device - op, err := context.Executor.ExecuteCommandWithCombinedOutput(baseCommand, immediateExecuteArgs...) - if err != nil { - cvLogFilePath := path.Join(cvLogDir, "ceph-volume.log") - - // Print c-v log before exiting - cvLog := readCVLogContent(cvLogFilePath) - if cvLog != "" { - logger.Errorf("%s", cvLog) - } - - // Return failure - return "", "", "", errors.Wrapf(err, "failed to run ceph-volume. %s. debug logs below:\n%s", op, cvLog) - } - logger.Infof("%v", op) - // if raw mode is used or PV on LV, let's return the path of the device - if cephVolumeMode == "raw" && !isEncrypted { - blockPath = deviceArg - } else if cephVolumeMode == "raw" && isEncrypted { - blockPath = getEncryptedBlockPath(op, oposd.DmcryptBlockType) - if blockPath == "" { - return "", "", "", errors.New("failed to get encrypted block path from ceph-volume lvm prepare output") - } - if metadataDev { - metadataBlockPath = getEncryptedBlockPath(op, oposd.DmcryptMetadataType) - if metadataBlockPath == "" { - return "", "", "", errors.New("failed to get encrypted block.db path from ceph-volume lvm prepare output") - } - } - if walDev { - walBlockPath = getEncryptedBlockPath(op, oposd.DmcryptWalType) - if walBlockPath == "" { - return "", "", "", errors.New("failed to get encrypted block.wal path from ceph-volume lvm prepare output") - } - } - } else { - blockPath = getLVPath(op) - if blockPath == "" { - return "", "", "", errors.New("failed to get lv path from ceph-volume lvm prepare output") - } - } - } else { - logger.Infof("skipping device %q with osd %d already configured", device.Config.Name, device.Data) - } - } - - return blockPath, metadataBlockPath, walBlockPath, nil -} - -func getLVPath(op string) string { - tmp := sys.Grep(op, "Volume group") - vgtmp := strings.Split(tmp, "\"") - - tmp = sys.Grep(op, "Logical volume") - lvtmp := strings.Split(tmp, "\"") - - if len(vgtmp) >= 2 && len(lvtmp) >= 2 { - if sys.Grep(vgtmp[1], "ceph") != "" && sys.Grep(lvtmp[1], "osd-block") != "" { - return fmt.Sprintf("/dev/%s/%s", vgtmp[1], lvtmp[1]) - } - } - return "" -} - -func getEncryptedBlockPath(op, blockType string) string { - re := regexp.MustCompile("(?m)^.*luksOpen.*$") - matches := re.FindAllString(op, -1) - - for _, line := range matches { - lineSlice := strings.Fields(line) - for _, word := range lineSlice { - if strings.Contains(word, blockType) { - return fmt.Sprintf("/dev/mapper/%s", word) - } - } - } - - return "" -} - -// UpdateLVMConfig updates the lvm.conf file -func UpdateLVMConfig(context *clusterd.Context, onPVC, lvBackedPV bool) error { - - input, err := ioutil.ReadFile(lvmConfPath) - if err != nil { - return errors.Wrapf(err, "failed to read lvm config file %q", lvmConfPath) - } - - output := bytes.Replace(input, []byte("udev_sync = 1"), []byte("udev_sync = 0"), 1) - output = bytes.Replace(output, []byte("allow_changes_with_duplicate_pvs = 0"), []byte("allow_changes_with_duplicate_pvs = 1"), 1) - output = bytes.Replace(output, []byte("udev_rules = 1"), []byte("udev_rules = 0"), 1) - output = bytes.Replace(output, []byte("use_lvmetad = 1"), []byte("use_lvmetad = 0"), 1) - output = bytes.Replace(output, []byte("obtain_device_list_from_udev = 1"), []byte("obtain_device_list_from_udev = 0"), 1) - - // When running on PVC - if onPVC { - output = bytes.Replace(output, []byte(`scan = [ "/dev" ]`), []byte(`scan = [ "/dev", "/mnt" ]`), 1) - // Only filter blocks in /mnt, when running on PVC we copy the PVC claim path to /mnt - // And reject everything else - // We have 2 different regex depending on the version of LVM present in the container... - // Since https://github.com/lvmteam/lvm2/commit/08396b4bce45fb8311979250623f04ec0ddb628c#diff-13c602a6258e57ce666a240e67c44f38 - // the content changed, so depending which version is installled one of the two replace will work - if lvBackedPV { - // ceph-volume calls lvs to locate given "vg/lv", so allow "/dev" here. However, ignore loopback devices - output = bytes.Replace(output, []byte(`# filter = [ "a|.*/|" ]`), []byte(`filter = [ "a|^/mnt/.*|", "r|^/dev/loop.*|", "a|^/dev/.*|", "r|.*|" ]`), 1) - output = bytes.Replace(output, []byte(`# filter = [ "a|.*|" ]`), []byte(`filter = [ "a|^/mnt/.*|", "r|^/dev/loop.*|", "a|^/dev/.*|", "r|.*|" ]`), 1) - } else { - output = bytes.Replace(output, []byte(`# filter = [ "a|.*/|" ]`), []byte(`filter = [ "a|^/mnt/.*|", "r|.*|" ]`), 1) - output = bytes.Replace(output, []byte(`# filter = [ "a|.*|" ]`), []byte(`filter = [ "a|^/mnt/.*|", "r|.*|" ]`), 1) - } - } - - if err = ioutil.WriteFile(lvmConfPath, output, 0600); err != nil { - return errors.Wrapf(err, "failed to update lvm config file %q", lvmConfPath) - } - - logger.Infof("Successfully updated lvm config file %q", lvmConfPath) - return nil -} - -func (a *OsdAgent) useRawMode(context *clusterd.Context, pvcBacked bool) (bool, error) { - if pvcBacked { - return a.clusterInfo.CephVersion.IsAtLeast(cephVolumeRawModeMinCephVersion), nil - } - - var useRawMode bool - // Can we safely use ceph-volume raw mode in the non-PVC case? - // On non-PVC we see a race between systemd-udev and the osd process to acquire the lock on the device - if a.clusterInfo.CephVersion.IsNautilus() && a.clusterInfo.CephVersion.IsAtLeast(cephFlockFixNautilusMinCephVersion) { - logger.Debugf("will use raw mode since cluster version is at least %v", cephFlockFixNautilusMinCephVersion) - useRawMode = true - } - - if a.clusterInfo.CephVersion.IsOctopus() && a.clusterInfo.CephVersion.IsAtLeast(cephFlockFixOctopusMinCephVersion) { - logger.Debugf("will use raw mode since cluster version is at least %v", cephFlockFixOctopusMinCephVersion) - useRawMode = true - } - - if a.clusterInfo.CephVersion.IsAtLeastPacific() { - logger.Debug("will use raw mode since cluster version is at least pacific") - useRawMode = true - } - - // ceph-volume raw mode does not support encryption yet - if a.storeConfig.EncryptedDevice { - logger.Debug("won't use raw mode since encryption is enabled") - useRawMode = false - } - - // ceph-volume raw mode does not support more than one OSD per disk - osdsPerDeviceCountString := sanitizeOSDsPerDevice(a.storeConfig.OSDsPerDevice) - osdsPerDeviceCount, err := strconv.Atoi(osdsPerDeviceCountString) - if err != nil { - return false, errors.Wrapf(err, "failed to convert string %q to integer", osdsPerDeviceCountString) - } - if osdsPerDeviceCount > 1 { - logger.Debugf("won't use raw mode since osd per device is %d", osdsPerDeviceCount) - useRawMode = false - } - - // ceph-volume raw mode mode does not support metadata device if not running on PVC because the user has specified a whole device - if a.metadataDevice != "" { - logger.Debugf("won't use raw mode since there is a metadata device %q", a.metadataDevice) - useRawMode = false - } - - return useRawMode, nil -} - -func (a *OsdAgent) initializeDevices(context *clusterd.Context, devices *DeviceOsdMapping, allowRawMode bool) error { - // it's a little strange to split this into parts, looping here and in the init functions, but - // the LVM mode init requires the ability to loop over all the devices looking for metadata. - rawDevices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{}, - } - lvmDevices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{}, - } - - for name, device := range devices.Entries { - // Even if we can use raw mode, do NOT use raw mode on disks. Ceph bluestore disks can - // sometimes appear as though they have "phantom" Atari (AHDI) partitions created on them - // when they don't in reality. This is due to a series of bugs in the Linux kernel when it - // is built with Atari support enabled. This behavior does not appear for raw mode OSDs on - // partitions, and we need the raw mode to create partition-based OSDs. We cannot merely - // skip creating OSDs on "phantom" partitions due to a bug in `ceph-volume raw inventory` - // which reports only the phantom partitions (and malformed OSD info) when they exist and - // ignores the original (correct) OSDs created on the raw disk. - // See: https://github.com/rook/rook/issues/7940 - if device.DeviceInfo.Type != sys.DiskType && allowRawMode { - rawDevices.Entries[name] = device - continue - } - lvmDevices.Entries[name] = device - } - - err := a.initializeDevicesRawMode(context, rawDevices) - if err != nil { - return err - } - - err = a.initializeDevicesLVMMode(context, lvmDevices) - if err != nil { - return err - } - - return nil -} - -func (a *OsdAgent) initializeDevicesRawMode(context *clusterd.Context, devices *DeviceOsdMapping) error { - baseCommand := "stdbuf" - cephVolumeMode := "raw" - baseArgs := []string{"-oL", cephVolumeCmd, cephVolumeMode, "prepare", "--bluestore"} - - for name, device := range devices.Entries { - deviceArg := path.Join("/dev", name) - if device.Data == -1 { - logger.Infof("configuring new raw device %q", deviceArg) - - immediateExecuteArgs := append(baseArgs, []string{ - "--data", - deviceArg, - }...) - - // assign the device class specific to the device - immediateExecuteArgs = a.appendDeviceClassArg(device, immediateExecuteArgs) - - // execute ceph-volume with the device - op, err := context.Executor.ExecuteCommandWithCombinedOutput(baseCommand, immediateExecuteArgs...) - if err != nil { - cvLogFilePath := path.Join(cephLogDir, "ceph-volume.log") - - // Print c-v log before exiting - cvLog := readCVLogContent(cvLogFilePath) - if cvLog != "" { - logger.Errorf("%s", cvLog) - } - - // Return failure - return errors.Wrapf(err, "failed to run ceph-volume raw command. %s", op) // fail return here as validation provided by ceph-volume - } - logger.Infof("%v", op) - } else { - logger.Infof("skipping device %q with osd %d already configured", deviceArg, device.Data) - } - } - - return nil -} - -func (a *OsdAgent) initializeDevicesLVMMode(context *clusterd.Context, devices *DeviceOsdMapping) error { - storeFlag := "--bluestore" - - logPath := "/tmp/ceph-log" - if err := os.MkdirAll(logPath, 0700); err != nil { - return errors.Wrapf(err, "failed to create dir %q", logPath) - } - - // Use stdbuf to capture the python output buffer such that we can write to the pod log as the logging happens - // instead of using the default buffering that will log everything after ceph-volume exits - baseCommand := "stdbuf" - baseArgs := []string{"-oL", cephVolumeCmd, "--log-path", logPath, "lvm", "batch", "--prepare", storeFlag, "--yes"} - if a.storeConfig.EncryptedDevice { - baseArgs = append(baseArgs, encryptedFlag) - } - - osdsPerDeviceCount := sanitizeOSDsPerDevice(a.storeConfig.OSDsPerDevice) - batchArgs := baseArgs - - metadataDevices := make(map[string]map[string]string) - for name, device := range devices.Entries { - if device.Data == -1 { - if device.Metadata != nil { - logger.Infof("skipping metadata device %s config since it will be configured with a data device", name) - continue - } - - logger.Infof("configuring new LVM device %s", name) - deviceArg := path.Join("/dev", name) - // ceph-volume prefers to use /dev/mapper/ if the device has this kind of alias - for _, devlink := range device.PersistentDevicePaths { - if strings.HasPrefix(devlink, "/dev/mapper") { - deviceArg = devlink - } - } - - deviceOSDCount := osdsPerDeviceCount - if device.Config.OSDsPerDevice > 1 { - deviceOSDCount = sanitizeOSDsPerDevice(device.Config.OSDsPerDevice) - } - - if a.metadataDevice != "" || device.Config.MetadataDevice != "" { - // When mixed hdd/ssd devices are given, ceph-volume configures db lv on the ssd. - // the device will be configured as a batch at the end of the method - md := a.metadataDevice - if device.Config.MetadataDevice != "" { - md = device.Config.MetadataDevice - } - logger.Infof("using %s as metadataDevice for device %s and let ceph-volume lvm batch decide how to create volumes", md, deviceArg) - if _, ok := metadataDevices[md]; ok { - // Fail when two devices using the same metadata device have different values for osdsPerDevice - metadataDevices[md]["devices"] += " " + deviceArg - if deviceOSDCount != metadataDevices[md]["osdsperdevice"] { - return errors.Errorf("metadataDevice (%s) has more than 1 osdsPerDevice value set: %s != %s", md, deviceOSDCount, metadataDevices[md]["osdsperdevice"]) - } - } else { - metadataDevices[md] = make(map[string]string) - metadataDevices[md]["osdsperdevice"] = deviceOSDCount - if device.Config.DeviceClass != "" { - metadataDevices[md]["deviceclass"] = device.Config.DeviceClass - } - metadataDevices[md]["devices"] = deviceArg - } - deviceDBSizeMB := getDatabaseSize(a.storeConfig.DatabaseSizeMB, device.Config.DatabaseSizeMB) - if storeFlag == "--bluestore" && deviceDBSizeMB > 0 { - if deviceDBSizeMB < cephVolumeMinDBSize { - // ceph-volume will convert this value to ?G. It needs to be > 1G to invoke lvcreate. - logger.Infof("skipping databaseSizeMB setting (%d). For it should be larger than %dMB.", deviceDBSizeMB, cephVolumeMinDBSize) - } else { - dbSizeString := strconv.FormatUint(display.MbTob(uint64(deviceDBSizeMB)), 10) - if _, ok := metadataDevices[md]["databasesizemb"]; ok { - if metadataDevices[md]["databasesizemb"] != dbSizeString { - return errors.Errorf("metadataDevice (%s) has more than 1 databaseSizeMB value set: %s != %s", md, metadataDevices[md]["databasesizemb"], dbSizeString) - } - } else { - metadataDevices[md]["databasesizemb"] = dbSizeString - } - } - } - } else { - immediateExecuteArgs := append(baseArgs, []string{ - osdsPerDeviceFlag, - deviceOSDCount, - deviceArg, - }...) - - // assign the device class specific to the device - immediateExecuteArgs = a.appendDeviceClassArg(device, immediateExecuteArgs) - - // Reporting - immediateReportArgs := append(immediateExecuteArgs, []string{ - "--report", - }...) - - logger.Infof("Base command - %+v", baseCommand) - logger.Infof("immediateExecuteArgs - %+v", immediateExecuteArgs) - logger.Infof("immediateReportArgs - %+v", immediateReportArgs) - if err := context.Executor.ExecuteCommand(baseCommand, immediateReportArgs...); err != nil { - return errors.Wrap(err, "failed ceph-volume report") // fail return here as validation provided by ceph-volume - } - - // execute ceph-volume immediately with the device-specific setting instead of batching up multiple devices together - if err := context.Executor.ExecuteCommand(baseCommand, immediateExecuteArgs...); err != nil { - cvLog := readCVLogContent("/tmp/ceph-log/ceph-volume.log") - if cvLog != "" { - logger.Errorf("%s", cvLog) - } - - return errors.Wrap(err, "failed ceph-volume") - } - - } - } else { - logger.Infof("skipping device %s with osd %d already configured", name, device.Data) - } - } - - for md, conf := range metadataDevices { - - mdArgs := batchArgs - if _, ok := conf["osdsperdevice"]; ok { - mdArgs = append(mdArgs, []string{ - osdsPerDeviceFlag, - conf["osdsperdevice"], - }...) - } - if _, ok := conf["deviceclass"]; ok { - mdArgs = append(mdArgs, []string{ - crushDeviceClassFlag, - conf["deviceclass"], - }...) - } - if _, ok := conf["databasesizemb"]; ok { - mdArgs = append(mdArgs, []string{ - databaseSizeFlag, - conf["databasesizemb"], - }...) - } - mdArgs = append(mdArgs, strings.Split(conf["devices"], " ")...) - - // Do not change device names if udev persistent names are passed - mdPath := md - if !strings.HasPrefix(mdPath, "/dev") { - mdPath = path.Join("/dev", md) - } - - mdArgs = append(mdArgs, []string{ - dbDeviceFlag, - mdPath, - }...) - - // Reporting - reportArgs := append(mdArgs, []string{ - "--report", - }...) - - if err := context.Executor.ExecuteCommand(baseCommand, reportArgs...); err != nil { - return errors.Wrap(err, "failed ceph-volume report") // fail return here as validation provided by ceph-volume - } - - reportArgs = append(reportArgs, []string{ - "--format", - "json", - }...) - - cvOut, err := context.Executor.ExecuteCommandWithOutput(baseCommand, reportArgs...) - if err != nil { - return errors.Wrapf(err, "failed ceph-volume json report: %s", cvOut) // fail return here as validation provided by ceph-volume - } - - logger.Debugf("ceph-volume reports: %+v", cvOut) - - // ceph version v14.2.13 and v15.2.8 changed the changed output format of `lvm batch --prepare --report` - // use previous logic if ceph version does not fall into this range - if !isNewStyledLvmBatch(a.clusterInfo.CephVersion) { - var cvReport cephVolReport - if err = json.Unmarshal([]byte(cvOut), &cvReport); err != nil { - return errors.Wrap(err, "failed to unmarshal ceph-volume report json") - } - - if mdPath != cvReport.Vg.Devices { - return errors.Errorf("ceph-volume did not use the expected metadataDevice [%s]", mdPath) - } - } else { - var cvReports []cephVolReportV2 - if err = json.Unmarshal([]byte(cvOut), &cvReports); err != nil { - return errors.Wrap(err, "failed to unmarshal ceph-volume report json") - } - - if len(strings.Split(conf["devices"], " ")) != len(cvReports) { - return errors.Errorf("failed to create enough required devices, required: %s, actual: %v", cvOut, cvReports) - } - - for _, report := range cvReports { - if report.BlockDB != mdPath { - return errors.Errorf("wrong db device for %s, required: %s, actual: %s", report.Data, mdPath, report.BlockDB) - } - } - } - - // execute ceph-volume batching up multiple devices - if err := context.Executor.ExecuteCommand(baseCommand, mdArgs...); err != nil { - return errors.Wrap(err, "failed ceph-volume") // fail return here as validation provided by ceph-volume - } - } - - return nil -} - -func (a *OsdAgent) appendDeviceClassArg(device *DeviceOsdIDEntry, args []string) []string { - deviceClass := device.Config.DeviceClass - if deviceClass == "" { - // fall back to the device class for all devices on the node - deviceClass = a.storeConfig.DeviceClass - } - if deviceClass != "" { - args = append(args, []string{ - crushDeviceClassFlag, - deviceClass, - }...) - } - return args -} - -func lvmPreReq(context *clusterd.Context, pvcBacked, lvBackedPV bool) error { - // Check for the presence of LVM on the host when NOT running on PVC - // since this scenario is still using LVM - ne := NewNsenter(context, lvmCommandToCheck, []string{"--help"}) - err := ne.checkIfBinaryExistsOnHost() - if err != nil { - return errors.Wrapf(err, "binary %q does not exist on the host, make sure lvm2 package is installed", lvmCommandToCheck) - } - - // Update LVM configuration file - // Only do this after Ceph Nautilus 14.2.6 since it will use the ceph-volume raw mode by default and not LVM anymore - if err := UpdateLVMConfig(context, pvcBacked, lvBackedPV); err != nil { - return errors.Wrap(err, "failed to update lvm configuration file") - } - - return nil -} - -func getDatabaseSize(globalSize int, deviceSize int) int { - if deviceSize > 0 { - globalSize = deviceSize - } - return globalSize -} - -func sanitizeOSDsPerDevice(count int) string { - if count < 1 { - count = 1 - } - return strconv.Itoa(count) -} - -// GetCephVolumeLVMOSDs list OSD prepared with lvm mode -func GetCephVolumeLVMOSDs(context *clusterd.Context, clusterInfo *client.ClusterInfo, cephfsid, lv string, skipLVRelease, lvBackedPV bool) ([]oposd.OSDInfo, error) { - // lv can be a block device if raw mode is used - cvMode := "lvm" - - var lvPath string - args := []string{cvMode, "list", lv, "--format", "json"} - result, err := callCephVolume(context, false, args...) - if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve ceph-volume %s list results", cvMode) - } - - var osds []oposd.OSDInfo - var cephVolumeResult map[string][]osdInfo - err = json.Unmarshal([]byte(result), &cephVolumeResult) - if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal ceph-volume %s list results. %s", cvMode, result) - } - - for name, osdInfo := range cephVolumeResult { - id, err := strconv.Atoi(name) - if err != nil { - logger.Errorf("bad osd returned from ceph-volume %q", name) - continue - } - var osdFSID, osdDeviceClass string - for _, osd := range osdInfo { - if osd.Tags.ClusterFSID != cephfsid { - logger.Infof("skipping osd%d: %q running on a different ceph cluster %q", id, osd.Tags.OSDFSID, osd.Tags.ClusterFSID) - continue - } - osdFSID = osd.Tags.OSDFSID - osdDeviceClass = osd.Tags.CrushDeviceClass - - // If no lv is specified let's take the one we discovered - if lv == "" { - lvPath = osd.Path - } - - } - - if len(osdFSID) == 0 { - logger.Infof("Skipping osd%d as no instances are running on ceph cluster %q", id, cephfsid) - continue - } - logger.Infof("osdInfo has %d elements. %+v", len(osdInfo), osdInfo) - - // If lv was passed as an arg let's use it in osdInfo - if lv != "" { - lvPath = lv - } - - osd := oposd.OSDInfo{ - ID: id, - Cluster: "ceph", - UUID: osdFSID, - BlockPath: lvPath, - SkipLVRelease: skipLVRelease, - LVBackedPV: lvBackedPV, - CVMode: cvMode, - Store: "bluestore", - DeviceClass: osdDeviceClass, - } - osds = append(osds, osd) - } - logger.Infof("%d ceph-volume lvm osd devices configured on this node", len(osds)) - - return osds, nil -} - -func readCVLogContent(cvLogFilePath string) string { - // Open c-v log file - cvLogFile, err := os.Open(filepath.Clean(cvLogFilePath)) - if err != nil { - logger.Errorf("failed to open ceph-volume log file %q. %v", cvLogFilePath, err) - return "" - } - // #nosec G307 Calling defer to close the file without checking the error return is not a risk for a simple file open and close - defer cvLogFile.Close() - - // Read c-v log file - b, err := ioutil.ReadAll(cvLogFile) - if err != nil { - logger.Errorf("failed to read ceph-volume log file %q. %v", cvLogFilePath, err) - return "" - } - - return string(b) -} - -// GetCephVolumeRawOSDs list OSD prepared with raw mode -func GetCephVolumeRawOSDs(context *clusterd.Context, clusterInfo *client.ClusterInfo, cephfsid, block, metadataBlock, walBlock string, lvBackedPV, skipDeviceClass bool) ([]oposd.OSDInfo, error) { - // lv can be a block device if raw mode is used - cvMode := "raw" - - // Whether to fill the blockPath using the list result or the value that was passed in the function's call - var setDevicePathFromList bool - - // blockPath represents the path of the OSD block - // it can be the one passed from the function's call or discovered by the c-v list command - var blockPath string - - args := []string{cvMode, "list", block, "--format", "json"} - if block == "" { - setDevicePathFromList = true - args = []string{cvMode, "list", "--format", "json"} - } - - result, err := callCephVolume(context, false, args...) - if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve ceph-volume %s list results", cvMode) - } - - var osds []oposd.OSDInfo - var cephVolumeResult map[string]osdInfoBlock - err = json.Unmarshal([]byte(result), &cephVolumeResult) - if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal ceph-volume %s list results", cvMode) - } - - for _, osdInfo := range cephVolumeResult { - var osdFSID string - osdID := osdInfo.OsdID - - if osdInfo.CephFsid != cephfsid { - message := fmt.Sprintf("osd.%d: %q belonging to a different ceph cluster %q", osdID, osdInfo.OsdUUID, osdInfo.CephFsid) - // We must return an error since the caller only checks the length of osds - if isOnPVC { - return nil, errors.Errorf("%s", message) - } - logger.Infof("skipping %s", message) - continue - } - osdFSID = osdInfo.OsdUUID - - if len(osdFSID) == 0 { - message := fmt.Sprintf("no instance of osd.%d is running on ceph cluster %q (incomplete prepare? consider wiping the disks)", osdID, cephfsid) - if isOnPVC { - return nil, errors.Errorf("%s", message) - } - logger.Infof("skipping since %s", message) - continue - } - - // If no block is specified let's take the one we discovered - if setDevicePathFromList { - blockPath = osdInfo.Device - } else { - blockPath = block - } - - osd := oposd.OSDInfo{ - ID: osdID, - Cluster: "ceph", - UUID: osdFSID, - // let's not use osdInfo.Device, the device reported by bluestore tool since it might change during the next re-attach - // during the prepare sequence, the device is attached, then detached then re-attached - // During the last attach it could end up with a different /dev/ name - // Thus in the activation sequence we might activate the wrong OSD and have OSDInfo messed up - // Hence, let's use the PVC name instead which will always remain consistent - BlockPath: blockPath, - MetadataPath: metadataBlock, - WalPath: walBlock, - SkipLVRelease: true, - LVBackedPV: lvBackedPV, - CVMode: cvMode, - Store: "bluestore", - } - - if !skipDeviceClass { - diskInfo, err := clusterd.PopulateDeviceInfo(blockPath, context.Executor) - if err != nil { - return nil, errors.Wrapf(err, "failed to get device info for %q", blockPath) - } - osd.DeviceClass = sys.GetDiskDeviceClass(diskInfo) - logger.Infof("setting device class %q for device %q", osd.DeviceClass, diskInfo.Name) - } - - // If this is an encrypted OSD - if os.Getenv(oposd.CephVolumeEncryptedKeyEnvVarName) != "" { - // // Set subsystem and label for recovery and detection - // We use /mnt/ since LUKS label/subsystem must be applied on the main block device, not the resulting encrypted dm - mainBlock := fmt.Sprintf("/mnt/%s", os.Getenv(oposd.PVCNameEnvVarName)) - err = setLUKSLabelAndSubsystem(context, clusterInfo, mainBlock) - if err != nil { - return nil, errors.Wrapf(err, "failed to set subsystem and label to encrypted device %q for osd %d", mainBlock, osdID) - } - - // Close encrypted device - err = closeEncryptedDevice(context, block) - if err != nil { - return nil, errors.Wrapf(err, "failed to close encrypted device %q for osd %d", block, osdID) - } - - // If there is a metadata block - if metadataBlock != "" { - // Close encrypted device - err = closeEncryptedDevice(context, metadataBlock) - if err != nil { - return nil, errors.Wrapf(err, "failed to close encrypted db device %q for osd %d", metadataBlock, osdID) - } - } - - // If there is a wal block - if walBlock != "" { - // Close encrypted device - err = closeEncryptedDevice(context, walBlock) - if err != nil { - return nil, errors.Wrapf(err, "failed to close encrypted wal device %q for osd %d", walBlock, osdID) - } - } - } - - osds = append(osds, osd) - } - logger.Infof("%d ceph-volume raw osd devices configured on this node", len(osds)) - - return osds, nil -} - -func callCephVolume(context *clusterd.Context, requiresCombinedOutput bool, args ...string) (string, error) { - // Use stdbuf to capture the python output buffer such that we can write to the pod log as the - // logging happens instead of using the default buffering that will log everything after - // ceph-volume exits - baseCommand := "stdbuf" - - // Send the log to a temp location that isn't persisted to disk so that we can print out the - // failure log later without also printing out past failures - // TODO: does this mess up expectations from the ceph log collector daemon? - logPath := "/tmp/ceph-log" - if err := os.MkdirAll(logPath, 0700); err != nil { - return "", errors.Wrapf(err, "failed to create dir %q", logPath) - } - baseArgs := []string{"-oL", cephVolumeCmd, "--log-path", logPath} - - // Do not use combined output for "list" calls, otherwise we will get stderr is the output and this will break the json unmarshall - f := context.Executor.ExecuteCommandWithOutput - if requiresCombinedOutput { - // If the action is preparing we need the combined output - f = context.Executor.ExecuteCommandWithCombinedOutput - } - co, err := f(baseCommand, append(baseArgs, args...)...) - if err != nil { - // Print c-v log before exiting with failure - cvLog := readCVLogContent("/tmp/ceph-log/ceph-volume.log") - logger.Errorf("%s", co) - if cvLog != "" { - logger.Errorf("%s", cvLog) - } - - return "", errors.Wrapf(err, "failed ceph-volume call (see ceph-volume log above for more details)") - } - logger.Debugf("%v", co) - - return co, nil -} - -func appendOSDInfo(currentOSDs, osdsToAppend []oposd.OSDInfo) []oposd.OSDInfo { - for _, osdToAppend := range osdsToAppend { - if !isInOSDInfoList(osdToAppend.UUID, currentOSDs) { - currentOSDs = append(currentOSDs, osdToAppend) - } - } - return currentOSDs -} - -func isInOSDInfoList(uuid string, osds []oposd.OSDInfo) bool { - for _, osd := range osds { - if osd.UUID == uuid { - return true - } - } - - return false -} diff --git a/pkg/daemon/ceph/osd/volume_test.go b/pkg/daemon/ceph/osd/volume_test.go deleted file mode 100644 index 4e9506715..000000000 --- a/pkg/daemon/ceph/osd/volume_test.go +++ /dev/null @@ -1,1484 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - oposd "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/rook/rook/pkg/util/sys" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var initializeBlockPVCTestResult = ` - Volume group "ceph-bceae560-85b1-4a87-9375-6335fb760c8c" successfully created - Logical volume "osd-block-2ac8edb0-0d2e-4d8f-a6cc-4c972d56079c" created. -` - -var cephVolumeLVMTestResult = `{ - "0": [ - { - "devices": [ - "/dev/sdb" - ], - "lv_name": "osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "lv_path": "/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "lv_size": "<8.00g", - "lv_tags": "ceph.block_device=/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894,ceph.block_uuid=X39Wps-Qewq-d8LV-kj2p-ZqC3-IFQn-C35sV7,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=4bfe8b72-5e69-4330-b6c0-4d914db8ab89,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=dbe407e0-c1cb-495e-b30a-02e01de6c8ae,ceph.osd_id=0,ceph.type=block,ceph.vdo=0", - "lv_uuid": "X39Wps-Qewq-d8LV-kj2p-ZqC3-IFQn-C35sV7", - "name": "osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "path": "/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "tags": { - "ceph.block_device": "/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "ceph.block_uuid": "X39Wps-Qewq-d8LV-kj2p-ZqC3-IFQn-C35sV7", - "ceph.cephx_lockbox_secret": "", - "ceph.cluster_fsid": "4bfe8b72-5e69-4330-b6c0-4d914db8ab89", - "ceph.cluster_name": "ceph", - "ceph.crush_device_class": "None", - "ceph.encrypted": "0", - "ceph.osd_fsid": "dbe407e0-c1cb-495e-b30a-02e01de6c8ae", - "ceph.osd_id": "0", - "ceph.type": "block", - "ceph.vdo": "0" - }, - "type": "block", - "vg_name": "ceph-93550251-f76c-4219-a33f-df8805de7b9e" - } - ], - "1": [ - { - "devices": [ - "/dev/sdc" - ], - "lv_name": "osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "lv_path": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "lv_size": "<8.00g", - "lv_tags": "ceph.block_device=/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0,ceph.block_uuid=tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=4bfe8b72-5e69-4330-b6c0-4d914db8ab89,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=265d47ca-3e3c-4ef2-ac83-a44b7fb7feee,ceph.osd_id=1,ceph.type=block,ceph.vdo=0", - "lv_uuid": "tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk", - "name": "osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "path": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "tags": { - "ceph.block_device": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "ceph.block_uuid": "tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk", - "ceph.cephx_lockbox_secret": "", - "ceph.cluster_fsid": "4bfe8b72-5e69-4330-b6c0-4d914db8ab89", - "ceph.cluster_name": "ceph", - "ceph.crush_device_class": "None", - "ceph.encrypted": "0", - "ceph.osd_fsid": "265d47ca-3e3c-4ef2-ac83-a44b7fb7feee", - "ceph.osd_id": "1", - "ceph.type": "block", - "ceph.vdo": "0" - }, - "type": "block", - "vg_name": "ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42" - } - ] -} -` - -var cephVolumeTestResultMultiCluster = `{ - "0": [ - { - "devices": [ - "/dev/sdb" - ], - "lv_name": "osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "lv_path": "/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "lv_size": "<8.00g", - "lv_tags": "ceph.block_device=/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894,ceph.block_uuid=X39Wps-Qewq-d8LV-kj2p-ZqC3-IFQn-C35sV7,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=4bfe8b72-5e69-4330-b6c0-4d914db8ab89,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=dbe407e0-c1cb-495e-b30a-02e01de6c8ae,ceph.osd_id=0,ceph.type=block,ceph.vdo=0", - "lv_uuid": "X39Wps-Qewq-d8LV-kj2p-ZqC3-IFQn-C35sV7", - "name": "osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "path": "/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "tags": { - "ceph.block_device": "/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "ceph.block_uuid": "X39Wps-Qewq-d8LV-kj2p-ZqC3-IFQn-C35sV7", - "ceph.cephx_lockbox_secret": "", - "ceph.cluster_fsid": "451267e6-883f-4936-8dff-080d781c67d5", - "ceph.cluster_name": "ceph", - "ceph.crush_device_class": "None", - "ceph.encrypted": "0", - "ceph.osd_fsid": "dbe407e0-c1cb-495e-b30a-02e01de6c8ae", - "ceph.osd_id": "0", - "ceph.type": "block", - "ceph.vdo": "0" - }, - "type": "block", - "vg_name": "ceph-93550251-f76c-4219-a33f-df8805de7b9e" - }, - - { - "devices": [ - "/dev/sdc" - ], - "lv_name": "osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "lv_path": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "lv_size": "<8.00g", - "lv_tags": "ceph.block_device=/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0,ceph.block_uuid=tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=4bfe8b72-5e69-4330-b6c0-4d914db8ab89,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=265d47ca-3e3c-4ef2-ac83-a44b7fb7feee,ceph.osd_id=1,ceph.type=block,ceph.vdo=0", - "lv_uuid": "tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk", - "name": "osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "path": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "tags": { - "ceph.block_device": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "ceph.block_uuid": "tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk", - "ceph.cephx_lockbox_secret": "", - "ceph.cluster_fsid": "4bfe8b72-5e69-4330-b6c0-4d914db8ab89", - "ceph.cluster_name": "ceph", - "ceph.crush_device_class": "None", - "ceph.encrypted": "0", - "ceph.osd_fsid": "265d47ca-3e3c-4ef2-ac83-a44b7fb7feee", - "ceph.osd_id": "1", - "ceph.type": "block", - "ceph.vdo": "0" - }, - "type": "block", - "vg_name": "ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42" - } - ] -} -` - -var cephVolumeTestResultMultiClusterMultiOSD = `{ - "0": [ - { - "devices": [ - "/dev/sdb" - ], - "lv_name": "osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "lv_path": "/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "lv_size": "<8.00g", - "lv_tags": "ceph.block_device=/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894,ceph.block_uuid=X39Wps-Qewq-d8LV-kj2p-ZqC3-IFQn-C35sV7,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=4bfe8b72-5e69-4330-b6c0-4d914db8ab89,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=dbe407e0-c1cb-495e-b30a-02e01de6c8ae,ceph.osd_id=0,ceph.type=block,ceph.vdo=0", - "lv_uuid": "X39Wps-Qewq-d8LV-kj2p-ZqC3-IFQn-C35sV7", - "name": "osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "path": "/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "tags": { - "ceph.block_device": "/dev/ceph-93550251-f76c-4219-a33f-df8805de7b9e/osd-data-d1cb42c3-60f6-4347-82eb-3188dc3df894", - "ceph.block_uuid": "X39Wps-Qewq-d8LV-kj2p-ZqC3-IFQn-C35sV7", - "ceph.cephx_lockbox_secret": "", - "ceph.cluster_fsid": "451267e6-883f-4936-8dff-080d781c67d5", - "ceph.cluster_name": "ceph", - "ceph.crush_device_class": "None", - "ceph.encrypted": "0", - "ceph.osd_fsid": "dbe407e0-c1cb-495e-b30a-02e01de6c8ae", - "ceph.osd_id": "0", - "ceph.type": "block", - "ceph.vdo": "0" - }, - "type": "block", - "vg_name": "ceph-93550251-f76c-4219-a33f-df8805de7b9e" - }, - - { - "devices": [ - "/dev/sdc" - ], - "lv_name": "osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "lv_path": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "lv_size": "<8.00g", - "lv_tags": "ceph.block_device=/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0,ceph.block_uuid=tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=4bfe8b72-5e69-4330-b6c0-4d914db8ab89,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=265d47ca-3e3c-4ef2-ac83-a44b7fb7feee,ceph.osd_id=1,ceph.type=block,ceph.vdo=0", - "lv_uuid": "tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk", - "name": "osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "path": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "tags": { - "ceph.block_device": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "ceph.block_uuid": "tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk", - "ceph.cephx_lockbox_secret": "", - "ceph.cluster_fsid": "4bfe8b72-5e69-4330-b6c0-4d914db8ab89", - "ceph.cluster_name": "ceph", - "ceph.crush_device_class": "None", - "ceph.encrypted": "0", - "ceph.osd_fsid": "265d47ca-3e3c-4ef2-ac83-a44b7fb7feee", - "ceph.osd_id": "1", - "ceph.type": "block", - "ceph.vdo": "0" - }, - "type": "block", - "vg_name": "ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42" - } - ], - "1": [ - { - "devices": [ - "/dev/sdc" - ], - "lv_name": "osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "lv_path": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "lv_size": "<8.00g", - "lv_tags": "ceph.block_device=/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0,ceph.block_uuid=tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=4bfe8b72-5e69-4330-b6c0-4d914db8ab89,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=265d47ca-3e3c-4ef2-ac83-a44b7fb7feee,ceph.osd_id=1,ceph.type=block,ceph.vdo=0", - "lv_uuid": "tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk", - "name": "osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "path": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "tags": { - "ceph.block_device": "/dev/ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42/osd-data-5100eb6b-3a61-4fc1-80ee-86aec275b8b0", - "ceph.block_uuid": "tpdiTi-9Ozq-SrWi-p6od-LohX-s4U0-n2V0vk", - "ceph.cephx_lockbox_secret": "", - "ceph.cluster_fsid": "4bfe8b72-5e69-4330-b6c0-4d914db8ab89", - "ceph.cluster_name": "ceph", - "ceph.crush_device_class": "None", - "ceph.encrypted": "0", - "ceph.osd_fsid": "265d47ca-3e3c-4ef2-ac83-a44b7fb7feee", - "ceph.osd_id": "1", - "ceph.type": "block", - "ceph.vdo": "0" - }, - "type": "block", - "vg_name": "ceph-dfb1ca03-eb4f-4a5f-84b4-f4734aaefd42" - } - ] -} -` - -var cephVolumeRAWTestResult = `{ - "0": { - "ceph_fsid": "4bfe8b72-5e69-4330-b6c0-4d914db8ab89", - "device": "/dev/vdb", - "osd_id": 0, - "osd_uuid": "c03d7353-96e5-4a41-98de-830dfff97d06", - "type": "bluestore" - }, - "1": { - "ceph_fsid": "4bfe8b72-5e69-4330-b6c0-4d914db8ab89", - "device": "/dev/vdc", - "osd_id": 1, - "osd_uuid": "62132914-e779-48cf-8f55-fbc9692c8ce5", - "type": "bluestore" - } -} -` - -var cephVolumeRawPartitionTestResult = `{ - "0": { - "ceph_fsid": "4bfe8b72-5e69-4330-b6c0-4d914db8ab89", - "device": "/dev/vdb1", - "osd_id": 0, - "osd_uuid": "c03d7353-96e5-4a41-98de-830dfff97d06", - "type": "bluestore" - } -}` - -func createPVCAvailableDevices() *DeviceOsdMapping { - devices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": { - Data: -1, - Metadata: nil, - Config: DesiredDevice{ - Name: "/mnt/set1-data-0-rpf2k", - OSDsPerDevice: 1, - MetadataDevice: "", - DatabaseSizeMB: 0, - DeviceClass: "", - IsFilter: false, - IsDevicePathFilter: false, - }, - PersistentDevicePaths: []string{ - "/dev/rook-vg/rook-lv1", - "/dev/mapper/rook--vg-rook--lv1", - "/dev/disk/by-id/dm-name-rook--vg-rook--lv1", - "/dev/disk/by-id/dm-uuid-LVM-4BOeIsrVP5O2J36cVqMSJNLEcwGIrqSF12RyWdpUaiCuAqOa1hAmD6EUYTO6dyD1", - }, - }, - }, - } - - return devices -} - -func TestConfigureCVDevices(t *testing.T) { - originalLVMConfPath := lvmConfPath - lvmConfPathTemp, err := ioutil.TempFile("", "lvmconf") - if err != nil { - t.Fatal(err) - } - lvmConfPath = lvmConfPathTemp.Name() - defer func() { - os.Remove(lvmConfPath) - lvmConfPath = originalLVMConfPath - }() - - originalCephConfigDir := cephConfigDir - cephConfigDir, err = ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } - defer func() { - os.RemoveAll(cephConfigDir) - cephConfigDir = originalCephConfigDir - }() - - nodeName := "set1-data-0-rpf2k" - mountedDev := "/mnt/" + nodeName - mapperDev := "/dev/mapper/rook--vg-rook--lv1" - clusterFSID := "4bfe8b72-5e69-4330-b6c0-4d914db8ab89" - osdUUID := "c03d7353-96e5-4a41-98de-830dfff97d06" - lvBlockPath := "/dev/rook-vg/rook-lv1" - - // Test case for creating new raw mode OSD on LV-backed PVC - { - t.Log("Test case for creating new raw mode OSD on LV-backed PVC") - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("[MockExecuteCommandWithOutput] %s %v", command, args) - if command == "lsblk" && args[0] == mountedDev { - return fmt.Sprintf(`SIZE="17179869184" ROTA="1" RO="0" TYPE="lvm" PKNAME="" NAME="%s" KNAME="/dev/dm-1, a ...interface{})`, mapperDev), nil - } - if command == "sgdisk" { - return "Disk identifier (GUID): 18484D7E-5287-4CE9-AC73-D02FB69055CE", nil - } - if contains(args, "lvm") && contains(args, "list") { - return `{}`, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return "{\"key\":\"mysecurekey\"}", nil - } - if contains(args, "raw") && contains(args, "list") { - return fmt.Sprintf(`{ - "0": { - "ceph_fsid": "%s", - "device": "%s", - "osd_id": 0, - "osd_uuid": "%s", - "type": "bluestore" - } - } - `, clusterFSID, mountedDev, osdUUID), nil - } - return "", errors.Errorf("unknown command %s %s", command, args) - } - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("[MockExecuteCommandWithCombinedOutput] %s %v", command, args) - if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" && args[6] == mapperDev { - return "", nil - } - if contains(args, "lvm") && contains(args, "list") { - return `{}`, nil - } - return "", errors.Errorf("unknown command %s %s", command, args) - } - - context := &clusterd.Context{Executor: executor, ConfigDir: cephConfigDir} - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}, - FSID: clusterFSID, - } - agent := &OsdAgent{clusterInfo: clusterInfo, nodeName: nodeName, pvcBacked: true, storeConfig: config.StoreConfig{DeviceClass: "myds"}} - devices := createPVCAvailableDevices() - deviceOSDs, err := agent.configureCVDevices(context, devices) - assert.Nil(t, err) - assert.Len(t, deviceOSDs, 1) - deviceOSD := deviceOSDs[0] - logger.Infof("deviceOSDs: %+v", deviceOSDs) - assert.Equal(t, osdUUID, deviceOSD.UUID) - assert.Equal(t, mountedDev, deviceOSD.BlockPath) - assert.Equal(t, true, deviceOSD.SkipLVRelease) - assert.Equal(t, true, deviceOSD.LVBackedPV) - assert.Equal(t, "raw", deviceOSD.CVMode) - assert.Equal(t, "bluestore", deviceOSD.Store) - } - - { - // Test case for tending to create new lvm mode OSD on LV-backed PVC, but it catches an error - t.Log("Test case for tending to create new lvm mode OSD on LV-backed PVC, but it catches an error") - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("[MockExecuteCommandWithOutput] %s %v", command, args) - if command == "lsblk" && args[0] == mountedDev { - return fmt.Sprintf(`SIZE="17179869184" ROTA="1" RO="0" TYPE="lvm" PKNAME="" NAME="%s" KNAME="/dev/dm-1, a ...interface{})`, mapperDev), nil - } - return "", errors.Errorf("unknown command %s %s", command, args) - } - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - if command == "nsenter" { - return "", nil - } - logger.Infof("[MockExecuteCommandWithCombinedOutput] %s %v", command, args) - return "", errors.Errorf("unknown command %s %s", command, args) - } - - context := &clusterd.Context{Executor: executor, ConfigDir: cephConfigDir} - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 7}, // It doesn't support raw mode OSD - FSID: clusterFSID, - } - agent := &OsdAgent{clusterInfo: clusterInfo, nodeName: nodeName, pvcBacked: true} - devices := createPVCAvailableDevices() - - _, err := agent.configureCVDevices(context, devices) - - assert.EqualError(t, err, "failed to initialize devices on PVC: OSD on LV-backed PVC requires new Ceph to use raw mode") - } - - { - // Test case for with no available lvm mode OSD and existing raw mode OSD on LV-backed PVC, it should return info of raw mode OSD - t.Log("Test case for with no available lvm mode OSD and existing raw mode OSD on LV-backed PVC, it should return info of raw mode OSD") - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("[MockExecuteCommandWithOutput] %s %v", command, args) - if command == "lsblk" && args[0] == mountedDev { - return fmt.Sprintf(`SIZE="17179869184" ROTA="1" RO="0" TYPE="lvm" PKNAME="" NAME="%s" KNAME="/dev/dm-1, a ...interface{})`, mapperDev), nil - } - if command == "sgdisk" { - return "Disk identifier (GUID): 18484D7E-5287-4CE9-AC73-D02FB69055CE", nil - } - if args[1] == "ceph-volume" && args[4] == "lvm" && args[5] == "list" && args[6] == mapperDev { - return `{}`, nil - } - if args[1] == "ceph-volume" && args[4] == "raw" && args[5] == "list" && args[6] == mountedDev { - return fmt.Sprintf(`{ - "0": { - "ceph_fsid": "%s", - "device": "%s", - "osd_id": 0, - "osd_uuid": "%s", - "type": "bluestore" - } - } - `, clusterFSID, mountedDev, osdUUID), nil - } - return "", errors.Errorf("unknown command %s %s", command, args) - } - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - return "", errors.Errorf("unknown command %s %s", command, args) - } - - context := &clusterd.Context{Executor: executor, ConfigDir: cephConfigDir} - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}, // It supports raw mode OSD - FSID: clusterFSID, - } - agent := &OsdAgent{clusterInfo: clusterInfo, nodeName: nodeName, pvcBacked: true} - devices := &DeviceOsdMapping{Entries: map[string]*DeviceOsdIDEntry{}} - - deviceOSDs, err := agent.configureCVDevices(context, devices) - - assert.Nil(t, err) - assert.Len(t, deviceOSDs, 1) - deviceOSD := deviceOSDs[0] - logger.Infof("deviceOSDs: %+v", deviceOSDs) - assert.Equal(t, osdUUID, deviceOSD.UUID) - assert.Equal(t, mountedDev, deviceOSD.BlockPath) - assert.Equal(t, true, deviceOSD.SkipLVRelease) - assert.Equal(t, true, deviceOSD.LVBackedPV) - assert.Equal(t, "raw", deviceOSD.CVMode) - assert.Equal(t, "bluestore", deviceOSD.Store) - } - - { - // Test case for a lvm mode OSD on LV-backed PVC, it should return info of lvm mode OSD - t.Log("Test case for a lvm mode OSD on LV-backed PVC, it should return info of lvm mode OSD") - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("[MockExecuteCommandWithOutput] %s %v", command, args) - if command == "lsblk" && args[0] == mountedDev { - return fmt.Sprintf(`SIZE="17179869184" ROTA="1" RO="0" TYPE="lvm" PKNAME="" NAME="%s" KNAME="/dev/dm-1" - `, mapperDev), nil - } - if args[1] == "ceph-volume" && args[4] == "lvm" && args[5] == "list" { - return fmt.Sprintf(`{ - "0": [ - { - "devices": [ - "/dev/sdb" - ], - "lv_name": "lv1", - "lv_path": "%[1]s", - "lv_size": "6.00g", - "lv_tags": "ceph.block_device=%[1]s,ceph.block_uuid=hO8Hua-3H6B-qEt0-0NNN-ykFF-lsos-rSlmt2,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=%[2]s,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=%[3]s,ceph.osd_id=0,ceph.osdspec_affinity=,ceph.type=block,ceph.vdo=0", - "lv_uuid": "hO8Hua-3H6B-qEt0-0NNN-ykFF-lsos-rSlmt2", - "name": "lv1", - "path": "%[1]s", - "tags": { - "ceph.block_device": "%[1]s", - "ceph.block_uuid": "hO8Hua-3H6B-qEt0-0NNN-ykFF-lsos-rSlmt2", - "ceph.cephx_lockbox_secret": "", - "ceph.cluster_fsid": "%[2]s", - "ceph.cluster_name": "ceph", - "ceph.crush_device_class": "None", - "ceph.encrypted": "0", - "ceph.osd_fsid": "%[3]s", - "ceph.osd_id": "0", - "ceph.osdspec_affinity": "", - "ceph.type": "block", - "ceph.vdo": "0" - }, - "type": "block", - "vg_name": "test-vg" - } - ] - } - `, lvBlockPath, clusterFSID, osdUUID), nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - - context := &clusterd.Context{Executor: executor, ConfigDir: cephConfigDir} - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}, // It supports raw mode OSD - FSID: clusterFSID, - } - agent := &OsdAgent{clusterInfo: clusterInfo, nodeName: nodeName, pvcBacked: true} - devices := &DeviceOsdMapping{Entries: map[string]*DeviceOsdIDEntry{}} - - deviceOSDs, err := agent.configureCVDevices(context, devices) - - assert.Nil(t, err) - assert.Len(t, deviceOSDs, 1) - deviceOSD := deviceOSDs[0] - logger.Infof("deviceOSDs: %+v", deviceOSDs) - assert.Equal(t, osdUUID, deviceOSD.UUID) - assert.Equal(t, lvBlockPath, deviceOSD.BlockPath) - assert.Equal(t, true, deviceOSD.SkipLVRelease) - assert.Equal(t, true, deviceOSD.LVBackedPV) - assert.Equal(t, "lvm", deviceOSD.CVMode) - assert.Equal(t, "bluestore", deviceOSD.Store) - } - - { - t.Log("Test case for raw mode on partition") - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("[MockExecuteCommandWithOutput] %s %v", command, args) - // get lsblk for disks from cephVolumeRAWTestResult var - if command == "lsblk" && (args[0] == "/dev/vdb1") { - return fmt.Sprintf(`SIZE="17179869184" ROTA="1" RO="0" TYPE="part" PKNAME="" NAME="%s" KNAME="%s"`, args[0], args[0]), nil - } - if args[1] == "ceph-volume" && args[4] == "raw" && args[5] == "list" { - return cephVolumeRawPartitionTestResult, nil - } - if args[1] == "ceph-volume" && args[4] == "lvm" && args[5] == "list" { - return `{}`, nil - } - if command == "sgdisk" { - return "Disk identifier (GUID): 18484D7E-5287-4CE9-AC73-D02FB69055CE", nil - } - return "", errors.Errorf("unknown command %s %s", command, args) - } - deviceClassSet := false - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("[MockExecuteCommandWithCombinedOutput] %s %v", command, args) - if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" && args[7] == "--crush-device-class" { - assert.Equal(t, "myclass", args[8]) - deviceClassSet = true - return "", nil - } - return "", errors.Errorf("unknown command %s %s", command, args) - } - - context := &clusterd.Context{Executor: executor, ConfigDir: cephConfigDir} - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 1}, // It supports raw mode OSD - FSID: clusterFSID, - } - agent := &OsdAgent{clusterInfo: clusterInfo, nodeName: nodeName, storeConfig: config.StoreConfig{DeviceClass: "myclass"}} - devices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "vdb1": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/vdb1"}, DeviceInfo: &sys.LocalDisk{Type: sys.PartType}}, - }, - } - _, err := agent.configureCVDevices(context, devices) - assert.Nil(t, err) - assert.True(t, deviceClassSet) - } - - // disabled while raw mode is disabled for disks - // { - // // Test case for a raw mode OSD - // t.Log("Test case for a raw mode OSD") - // executor := &exectest.MockExecutor{} - // executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - // logger.Infof("[MockExecuteCommandWithOutput] %s %v", command, args) - // // get lsblk for disks from cephVolumeRAWTestResult var - // if command == "lsblk" && (args[0] == "/dev/vdb" || args[0] == "/dev/vdc") { - // return fmt.Sprintf(`SIZE="17179869184" ROTA="1" RO="0" TYPE="disk" PKNAME="" NAME="%s" KNAME="%s"`, args[0], args[0]), nil - // } - // if args[1] == "ceph-volume" && args[4] == "raw" && args[5] == "list" { - // return cephVolumeRAWTestResult, nil - // } - // if args[1] == "ceph-volume" && args[4] == "lvm" && args[5] == "list" { - // return `{}`, nil - // } - // if command == "sgdisk" { - // return "Disk identifier (GUID): 18484D7E-5287-4CE9-AC73-D02FB69055CE", nil - // } - // return "", errors.Errorf("unknown command %s %s", command, args) - // } - // deviceClassSet := false - // executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - // logger.Infof("[MockExecuteCommandWithCombinedOutput] %s %v", command, args) - // if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" && args[7] == "--crush-device-class" { - // assert.Equal(t, "myclass", args[8]) - // deviceClassSet = true - // return "", nil - // } - // return "", errors.Errorf("unknown command %s %s", command, args) - // } - - // context := &clusterd.Context{Executor: executor, ConfigDir: cephConfigDir} - // clusterInfo := &cephclient.ClusterInfo{ - // CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 1}, // It supports raw mode OSD - // FSID: clusterFSID, - // } - // agent := &OsdAgent{clusterInfo: clusterInfo, nodeName: nodeName, storeConfig: config.StoreConfig{DeviceClass: "myclass"}} - // devices := &DeviceOsdMapping{ - // Entries: map[string]*DeviceOsdIDEntry{ - // "vdb": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/vdb"}}, - // }, - // } - // _, err := agent.configureCVDevices(context, devices) - // assert.Nil(t, err) - // assert.True(t, deviceClassSet) - // } -} - -func testBaseArgs(args []string) error { - if args[1] == "ceph-volume" && args[2] == "--log-path" && args[3] == "/tmp/ceph-log" && args[4] == "lvm" && args[5] == "batch" && args[6] == "--prepare" && args[7] == "--bluestore" && args[8] == "--yes" { - return nil - } - - return errors.Errorf("unknown args %s ", args) -} - -func TestInitializeBlock(t *testing.T) { - // Common vars for all the tests - devices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda"}}, - }, - } - - // Test default behavior - { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommand = func(command string, args ...string) error { - logger.Infof("%s %v", command, args) - - // Validate base common args - err := testBaseArgs(args) - if err != nil { - return err - } - - // First command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" { - return nil - } - - // Second command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--report" { - return nil - } - - return errors.Errorf("unknown command %s %s", command, args) - } - a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}}, nodeName: "node1"} - context := &clusterd.Context{Executor: executor} - - err := a.initializeDevicesLVMMode(context, devices) - assert.NoError(t, err, "failed default behavior test") - logger.Info("success, go to next test") - } - - // Test encryption behavior - { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommand = func(command string, args ...string) error { - logger.Infof("%s %v", command, args) - - // Validate base common args - err := testBaseArgs(args) - if err != nil { - return err - } - - // First command - if args[9] == "--dmcrypt" && args[10] == "--osds-per-device" && args[11] == "1" && args[12] == "/dev/sda" { - return nil - } - - // Second command - if args[9] == "--dmcrypt" && args[10] == "--osds-per-device" && args[11] == "1" && args[12] == "/dev/sda" && args[13] == "--report" { - return nil - } - - return errors.Errorf("unknown command %s %s", command, args) - } - a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}}, nodeName: "node1", storeConfig: config.StoreConfig{EncryptedDevice: true}} - context := &clusterd.Context{Executor: executor} - - err := a.initializeDevicesLVMMode(context, devices) - assert.NoError(t, err, "failed encryption test") - logger.Info("success, go to next test") - } - - // Test multiple OSD per device - { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommand = func(command string, args ...string) error { - logger.Infof("%s %v", command, args) - - // Validate base common args - err := testBaseArgs(args) - if err != nil { - return err - } - - // First command - if args[9] == "--osds-per-device" && args[10] == "3" && args[11] == "/dev/sda" { - return nil - } - - // Second command - if args[9] == "--osds-per-device" && args[10] == "3" && args[11] == "/dev/sda" && args[12] == "--report" { - return nil - } - - return errors.Errorf("unknown command %s %s", command, args) - } - a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}}, nodeName: "node1", storeConfig: config.StoreConfig{OSDsPerDevice: 3}} - context := &clusterd.Context{Executor: executor} - - err := a.initializeDevicesLVMMode(context, devices) - assert.NoError(t, err, "failed multiple osd test") - logger.Info("success, go to next test") - } - - // Test crush device class - { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommand = func(command string, args ...string) error { - logger.Infof("%s %v", command, args) - - // Validate base common args - err := testBaseArgs(args) - if err != nil { - return err - } - - // First command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--crush-device-class" && args[13] == "hybrid" { - return nil - } - - // Second command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--crush-device-class" && args[13] == "hybrid" && args[14] == "--report" { - return nil - } - - return errors.Errorf("unknown command %s %s", command, args) - } - a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}}, nodeName: "node1", storeConfig: config.StoreConfig{DeviceClass: "hybrid"}} - context := &clusterd.Context{Executor: executor} - err := a.initializeDevicesLVMMode(context, devices) - assert.NoError(t, err, "failed crush device class test") - logger.Info("success, go to next test") - } - - // Test with metadata devices - { - devices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda", MetadataDevice: "sdb"}}, - }, - } - - executor := &exectest.MockExecutor{} - executor.MockExecuteCommand = func(command string, args ...string) error { - logger.Infof("%s %v", command, args) - - // Validate base common args - err := testBaseArgs(args) - if err != nil { - return err - } - - // First command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--db-devices" && args[13] == "/dev/sdb" { - return nil - } - - // Second command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--db-devices" && args[13] == "/dev/sdb" && args[14] == "--report" { - return nil - } - - return errors.Errorf("unknown command %s %s", command, args) - } - - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - - // Validate base common args - err := testBaseArgs(args) - if err != nil { - return "", err - } - - // First command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--db-devices" && args[13] == "/dev/sdb" { - return `{"vg": {"devices": "/dev/sdb"}}`, nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}}, nodeName: "node1"} - context := &clusterd.Context{Executor: executor} - - err := a.initializeDevicesLVMMode(context, devices) - assert.NoError(t, err, "failed metadata test") - logger.Info("success, go to next test") - } - - // Test with metadata devices with dev by-id - { - devices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda", MetadataDevice: "/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_1TB_XXX"}}, - }, - } - - executor := &exectest.MockExecutor{} - executor.MockExecuteCommand = func(command string, args ...string) error { - logger.Infof("%s %v", command, args) - - // Validate base common args - err := testBaseArgs(args) - if err != nil { - return err - } - - // First command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--db-devices" && args[13] == "/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_1TB_XXX" { - return nil - } - - // Second command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--db-devices" && args[13] == "/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_1TB_XXX" && args[14] == "--report" { - return nil - } - - return errors.Errorf("unknown command %s %s", command, args) - } - - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - - // Validate base common args - err := testBaseArgs(args) - if err != nil { - return "", err - } - - // First command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--db-devices" && args[13] == "/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_1TB_XXX" { - return `{"vg": {"devices": "/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_1TB_XXX"}}`, nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}}, nodeName: "node1"} - context := &clusterd.Context{Executor: executor} - - err := a.initializeDevicesLVMMode(context, devices) - assert.NoError(t, err, "failed metadata device by-id test") - logger.Info("success, go to next test") - } -} - -func TestInitializeBlockPVC(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" { - return initializeBlockPVCTestResult, nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - - // Test with CephVersion{Major: 14, Minor: 2, Extra: 8} for argument raw without flag --crush-device-class. - context := &clusterd.Context{Executor: executor} - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}, - } - a := &OsdAgent{clusterInfo: clusterInfo, nodeName: "node1"} - devices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/mnt/set1-data-0-rpf2k"}}, - }, - } - - blockPath, metadataBlockPath, walBlockPath, err := a.initializeBlockPVC(context, devices, false) - assert.Nil(t, err) - assert.Equal(t, "/mnt/set1-data-0-rpf2k", blockPath) - assert.Equal(t, "", metadataBlockPath) - assert.Equal(t, "", walBlockPath) - - // Test for failure scenario by giving CephVersion{Major: 14, Minor: 2, Extra: 7} - // instead of CephVersion{Major: 14, Minor: 2, Extra: 8}. - clusterInfo = &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 7}, - } - a = &OsdAgent{clusterInfo: clusterInfo, nodeName: "node1"} - devices = &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/mnt/set1-data-0-rpf2k"}}, - }, - } - - _, _, _, err = a.initializeBlockPVC(context, devices, false) - assert.NotNil(t, err) - - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if args[1] == "ceph-volume" && args[2] == "lvm" && args[3] == "prepare" && args[4] == "--bluestore" { - return initializeBlockPVCTestResult, nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - - // Test with CephVersion{Major: 14, Minor: 2, Extra: 7} for argument lvm without flag --crush-device-class. - clusterInfo = &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 7}, - } - a = &OsdAgent{clusterInfo: clusterInfo, nodeName: "node1"} - devices = &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/mnt/set1-data-0-rpf2k"}}, - }, - } - - blockPath, metadataBlockPath, walBlockPath, err = a.initializeBlockPVC(context, devices, false) - assert.Nil(t, err) - assert.Equal(t, "/dev/ceph-bceae560-85b1-4a87-9375-6335fb760c8c/osd-block-2ac8edb0-0d2e-4d8f-a6cc-4c972d56079c", blockPath) - assert.Equal(t, "", metadataBlockPath) - assert.Equal(t, "", walBlockPath) - - // Test for failure scenario by giving CephVersion{Major: 14, Minor: 2, Extra: 8} - // instead of cephver.CephVersion{Major: 14, Minor: 2, Extra: 7}. - clusterInfo = &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}, - } - a = &OsdAgent{clusterInfo: clusterInfo, nodeName: "node1"} - devices = &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/mnt/set1-data-0-rpf2k"}}, - }, - } - - _, _, _, err = a.initializeBlockPVC(context, devices, false) - assert.NotNil(t, err) - - // Test for OSD on LV-backed PVC where Ceph does not support raw mode. - // Expect no commands to be used. - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - return "", errors.Errorf("unknown command %s %s", command, args) - } - // Test with CephVersion{Major: 14, Minor: 2, Extra: 7} - clusterInfo = &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 7}, - } - a = &OsdAgent{clusterInfo: clusterInfo, nodeName: "node1"} - devices = &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/mnt/set1-data-0-rpf2k"}}, - }, - } - _, _, _, err = a.initializeBlockPVC(context, devices, true) - assert.NotNil(t, err) - logger.Infof("error message %v", err) - - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" && args[7] == "--crush-device-class" { - assert.Equal(t, "foo", args[8]) - return initializeBlockPVCTestResult, nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - // Test with CephVersion{Major: 14, Minor: 2, Extra: 8} for argument raw with flag --crush-device-class. - os.Setenv(oposd.CrushDeviceClassVarName, "foo") - defer os.Unsetenv(oposd.CrushDeviceClassVarName) - clusterInfo = &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}, - } - a = &OsdAgent{clusterInfo: clusterInfo, nodeName: "node1"} - devices = &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/mnt/set1-data-0-rpf2k"}}, - }, - } - - blockPath, metadataBlockPath, walBlockPath, err = a.initializeBlockPVC(context, devices, false) - assert.Nil(t, err) - assert.Equal(t, "/mnt/set1-data-0-rpf2k", blockPath) - assert.Equal(t, "", metadataBlockPath) - assert.Equal(t, "", walBlockPath) - - // Test for condition when Data !=-1 with CephVersion{Major: 14, Minor: 2, Extra: 8} for raw with flag --crush-device-class. - devices = &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": {Data: 0, Metadata: nil, Config: DesiredDevice{Name: "/mnt/set1-data-0-rpf2k"}}, - }, - } - - blockPath, metadataBlockPath, walBlockPath, err = a.initializeBlockPVC(context, devices, false) - assert.Nil(t, err) - assert.Equal(t, "", blockPath) - assert.Equal(t, "", metadataBlockPath) - assert.Equal(t, "", walBlockPath) -} - -func TestInitializeBlockPVCWithMetadata(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" && args[7] == "--block.db" { - return initializeBlockPVCTestResult, nil - } - return "", errors.Errorf("unknown command %s %s", command, args) - } - - // Test with CephVersion{Major: 14, Minor: 2, Extra: 8} for argument raw with flag --block.db and without --crush-device-class flag. - context := &clusterd.Context{Executor: executor} - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}, - } - a := &OsdAgent{clusterInfo: clusterInfo, nodeName: "node1"} - - devices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/mnt/set1-data-0-rpf2k"}}, - "metadata": {Data: 0, Metadata: []int{1}, Config: DesiredDevice{Name: "/srv/set1-metadata-0-8c7kr"}}, - "wal": {Data: 1, Metadata: []int{2}, Config: DesiredDevice{Name: ""}}, - }, - } - - blockPath, metadataBlockPath, walBlockPath, err := a.initializeBlockPVC(context, devices, false) - assert.Nil(t, err) - assert.Equal(t, "/mnt/set1-data-0-rpf2k", blockPath) - assert.Equal(t, "/srv/set1-metadata-0-8c7kr", metadataBlockPath) - assert.Equal(t, "", walBlockPath) - - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if args[1] == "ceph-volume" && args[2] == "lvm" && args[3] == "prepare" && args[4] == "--bluestore" && args[7] == "--block.db" { - return initializeBlockPVCTestResult, nil - } - return "", errors.Errorf("unknown command %s %s", command, args) - } - - // Test with CephVersion{Major: 14, Minor: 2, Extra: 7} for argument lvm with flag --block.db and without --crush-device-class flag. - clusterInfo = &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 7}, - } - a = &OsdAgent{clusterInfo: clusterInfo, nodeName: "node1"} - - devices = &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/mnt/set1-data-0-rpf2k"}}, - "metadata": {Data: 0, Metadata: []int{1}, Config: DesiredDevice{Name: "/srv/set1-metadata-0-8c7kr"}}, - "wal": {Data: 1, Metadata: []int{2}, Config: DesiredDevice{Name: ""}}, - }, - } - - blockPath, metadataBlockPath, walBlockPath, err = a.initializeBlockPVC(context, devices, false) - assert.Nil(t, err) - assert.Equal(t, "/dev/ceph-bceae560-85b1-4a87-9375-6335fb760c8c/osd-block-2ac8edb0-0d2e-4d8f-a6cc-4c972d56079c", blockPath) - assert.Equal(t, "/srv/set1-metadata-0-8c7kr", metadataBlockPath) - assert.Equal(t, "", walBlockPath) - - executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" && args[7] == "--crush-device-class" && args[9] == "--block.db" { - return initializeBlockPVCTestResult, nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - - // Test with CephVersion{Major: 14, Minor: 2, Extra: 8} for argument raw with flag --block.db and --crush-device-class flag. - os.Setenv(oposd.CrushDeviceClassVarName, "foo") - defer os.Unsetenv(oposd.CrushDeviceClassVarName) - context = &clusterd.Context{Executor: executor} - clusterInfo = &cephclient.ClusterInfo{ - CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}, - } - a = &OsdAgent{clusterInfo: clusterInfo, nodeName: "node1"} - - devices = &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "data": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/mnt/set1-data-0-rpf2k"}}, - "metadata": {Data: 0, Metadata: []int{1}, Config: DesiredDevice{Name: "/srv/set1-metadata-0-8c7kr"}}, - "wal": {Data: 1, Metadata: []int{2}, Config: DesiredDevice{Name: ""}}, - }, - } - - blockPath, metadataBlockPath, walBlockPath, err = a.initializeBlockPVC(context, devices, false) - assert.Nil(t, err) - assert.Equal(t, "/mnt/set1-data-0-rpf2k", blockPath) - assert.Equal(t, "/srv/set1-metadata-0-8c7kr", metadataBlockPath) - assert.Equal(t, "", walBlockPath) -} - -func TestParseCephVolumeLVMResult(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - - logger.Infof("%s %v", command, args) - if command == "stdbuf" { - if args[4] == "lvm" && args[5] == "list" { - return cephVolumeLVMTestResult, nil - } - } - return "", errors.Errorf("unknown command %s %s", command, args) - } - - context := &clusterd.Context{Executor: executor} - osds, err := GetCephVolumeLVMOSDs(context, &cephclient.ClusterInfo{Namespace: "name"}, "4bfe8b72-5e69-4330-b6c0-4d914db8ab89", "", false, false) - assert.Nil(t, err) - require.NotNil(t, osds) - assert.Equal(t, 2, len(osds)) -} - -func TestParseCephVolumeRawResult(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - if command == "stdbuf" { - if args[4] == "raw" && args[5] == "list" { - return cephVolumeRAWTestResult, nil - } - } - - // get lsblk for disks from cephVolumeRAWTestResult var - if command == "lsblk" && (args[0] == "/dev/vdb" || args[0] == "/dev/vdc") { - return fmt.Sprintf(`SIZE="17179869184" ROTA="1" RO="0" TYPE="disk" PKNAME="" NAME="%s" KNAME="%s"`, args[0], args[0]), nil - } - if command == "sgdisk" { - return "Disk identifier (GUID): 18484D7E-5287-4CE9-AC73-D02FB69055CE", nil - } - return "", errors.Errorf("unknown command: %s, args: %#v", command, args) - } - clusterInfo := &cephclient.ClusterInfo{Namespace: "name"} - - context := &clusterd.Context{Executor: executor, Clientset: test.New(t, 3)} - osds, err := GetCephVolumeRawOSDs(context, clusterInfo, "4bfe8b72-5e69-4330-b6c0-4d914db8ab89", "", "", "", false, false) - assert.Nil(t, err) - require.NotNil(t, osds) - assert.Equal(t, 2, len(osds)) -} - -func TestCephVolumeResultMultiClusterSingleOSD(t *testing.T) { - executor := &exectest.MockExecutor{} - // set up a mock function to return "rook owned" partitions on the device and it does not have a filesystem - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - - if command == "stdbuf" { - if args[4] == "lvm" && args[5] == "list" { - return cephVolumeTestResultMultiCluster, nil - } - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - - context := &clusterd.Context{Executor: executor} - osds, err := GetCephVolumeLVMOSDs(context, &cephclient.ClusterInfo{Namespace: "name"}, "451267e6-883f-4936-8dff-080d781c67d5", "", false, false) - - assert.Nil(t, err) - require.NotNil(t, osds) - assert.Equal(t, 1, len(osds)) - assert.Equal(t, osds[0].UUID, "dbe407e0-c1cb-495e-b30a-02e01de6c8ae") -} - -func TestCephVolumeResultMultiClusterMultiOSD(t *testing.T) { - executor := &exectest.MockExecutor{} - // set up a mock function to return "rook owned" partitions on the device and it does not have a filesystem - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("%s %v", command, args) - - if command == "stdbuf" { - if args[4] == "lvm" && args[5] == "list" { - return cephVolumeTestResultMultiClusterMultiOSD, nil - } - } - - return "", errors.Errorf("unknown command %s% s", command, args) - } - - context := &clusterd.Context{Executor: executor} - osds, err := GetCephVolumeLVMOSDs(context, &cephclient.ClusterInfo{Namespace: "name"}, "451267e6-883f-4936-8dff-080d781c67d5", "", false, false) - assert.Nil(t, err) - require.NotNil(t, osds) - assert.Equal(t, 1, len(osds)) - assert.Equal(t, osds[0].UUID, "dbe407e0-c1cb-495e-b30a-02e01de6c8ae") -} - -func TestSanitizeOSDsPerDevice(t *testing.T) { - assert.Equal(t, "1", sanitizeOSDsPerDevice(-1)) - assert.Equal(t, "1", sanitizeOSDsPerDevice(0)) - assert.Equal(t, "1", sanitizeOSDsPerDevice(1)) - assert.Equal(t, "2", sanitizeOSDsPerDevice(2)) -} - -func TestGetDatabaseSize(t *testing.T) { - assert.Equal(t, 0, getDatabaseSize(0, 0)) - assert.Equal(t, 2048, getDatabaseSize(4096, 2048)) -} - -func TestPrintCVLogContent(t *testing.T) { - tmp, err := ioutil.TempFile("", "cv-log") - assert.Nil(t, err) - - defer os.Remove(tmp.Name()) - - nodeName := "set1-2-data-jmxdx" - cvLogDir = path.Join(tmp.Name(), nodeName) - assert.Equal(t, path.Join(tmp.Name(), nodeName), cvLogDir) - - cvLogFilePath := path.Join(cvLogDir, "ceph-volume.log") - assert.Equal(t, path.Join(cvLogDir, "ceph-volume.log"), cvLogFilePath) - - // Print c-v log, it is empty so this is similating a failure (e,g: the file does not exist) - cvLog := readCVLogContent(tmp.Name()) - assert.Empty(t, cvLog, cvLog) - - // Write content in the file - cvDummyLog := []byte(`dummy log`) - _, err = tmp.Write(cvDummyLog) - assert.NoError(t, err) - // Print again, now there is content - cvLog = readCVLogContent(tmp.Name()) - assert.NotEmpty(t, cvLog, cvLog) -} - -func TestGetEncryptedBlockPath(t *testing.T) { - cvOp := ` -2020-08-13 13:33:55.181541 D | exec: Running command: stdbuf -oL ceph-volume --log-path /var/log/ceph/set1-data-0-hfdc6 raw prepare --bluestore --data /dev/xvdce --crush-device-class hybriddu13 --dmcrypt --block.db /dev/xvdbb --block.wal /dev/xvdcu -2020-08-13 13:34:34.246638 I | cephosd: Running command: /usr/bin/ceph-authtool --gen-print-key -Running command: /usr/bin/ceph-authtool --gen-print-key -Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new e3c9ca4a-d00f-464b-9ac7-91fb151f6c8d -Running command: /usr/bin/ceph-authtool --gen-print-key -Running command: /usr/sbin/cryptsetup --batch-mode --key-file - luksFormat /dev/xvdce -Running command: /usr/sbin/cryptsetup --key-file - --allow-discards luksOpen /dev/xvdce ceph-e3c9ca4a-d00f-464b-9ac7-91fb151f6c8d-xvdce-block-dmcrypt -Running command: /usr/sbin/cryptsetup --batch-mode --key-file - luksFormat /dev/xvdcu -Running command: /usr/sbin/cryptsetup --key-file - --allow-discards luksOpen /dev/xvdcu ceph-e3c9ca4a-d00f-464b-9ac7-91fb151f6c8d-xvdcu-wal-dmcrypt -Running command: /usr/sbin/cryptsetup --batch-mode --key-file - luksFormat /dev/xvdbb -Running command: /usr/sbin/cryptsetup --key-file - --allow-discards luksOpen /dev/xvdbb ceph-e3c9ca4a-d00f-464b-9ac7-91fb151f6c8d-xvdbb-db-dmcrypt -Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2 -Running command: /usr/bin/chown -R ceph:ceph /dev/mapper/ceph-e3c9ca4a-d00f-464b-9ac7-91fb151f6c8d-xvdce-block-dmcrypt -Running command: /usr/bin/ln -s /dev/mapper/ceph-e3c9ca4a-d00f-464b-9ac7-91fb151f6c8d-xvdce-block-dmcrypt /var/lib/ceph/osd/ceph-2/block -Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap` - - type args struct { - op string - blockType string - } - tests := []struct { - name string - args args - want string - }{ - {"not-found", args{"Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-1", "block-dmcrypt"}, ""}, - {"found-block", args{cvOp, "block-dmcrypt"}, "/dev/mapper/ceph-e3c9ca4a-d00f-464b-9ac7-91fb151f6c8d-xvdce-block-dmcrypt"}, - {"found-db", args{cvOp, "db-dmcrypt"}, "/dev/mapper/ceph-e3c9ca4a-d00f-464b-9ac7-91fb151f6c8d-xvdbb-db-dmcrypt"}, - {"found-wal", args{cvOp, "wal-dmcrypt"}, "/dev/mapper/ceph-e3c9ca4a-d00f-464b-9ac7-91fb151f6c8d-xvdcu-wal-dmcrypt"}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := getEncryptedBlockPath(tt.args.op, tt.args.blockType); got != tt.want { - t.Errorf("getEncryptedBlockPath() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestIsNewStyledLvmBatch(t *testing.T) { - newStyleLvmBatchVersion := cephver.CephVersion{Major: 14, Minor: 2, Extra: 15} - legacyLvmBatchVersion := cephver.CephVersion{Major: 14, Minor: 2, Extra: 8} - assert.Equal(t, true, isNewStyledLvmBatch(newStyleLvmBatchVersion)) - assert.Equal(t, false, isNewStyledLvmBatch(legacyLvmBatchVersion)) -} - -func TestInitializeBlockWithMD(t *testing.T) { - // Common vars for all the tests - devices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda", MetadataDevice: "/dev/sdd"}}, - }, - } - - // Test default behavior - { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommand = func(command string, args ...string) error { - logger.Infof("%s %v", command, args) - - // Validate base common args - err := testBaseArgs(args) - if err != nil { - return err - } - - // Second command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" { - return nil - } - - return errors.Errorf("unknown command %s %s", command, args) - } - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - // First command - if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--db-devices" && args[13] == "/dev/sdd" && args[14] == "--report" { - return `[{"block_db": "/dev/sdd", "encryption": "None", "data": "/dev/sda", "data_size": "100.00 GB", "block_db_size": "100.00 GB"}]`, nil - } - - return "", errors.Errorf("unknown command %s %s", command, args) - } - a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 15}}, nodeName: "node1"} - context := &clusterd.Context{Executor: executor} - - err := a.initializeDevicesLVMMode(context, devices) - assert.NoError(t, err, "failed default behavior test") - } -} - -func TestUseRawMode(t *testing.T) { - type fields struct { - clusterInfo *cephclient.ClusterInfo - metadataDevice string - storeConfig config.StoreConfig - pvcBacked bool - } - type args struct { - context *clusterd.Context - pvcBacked bool - } - tests := []struct { - name string - fields fields - args args - want bool - wantErr bool - }{ - {"on pvc with lvm", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 5}}, "", config.StoreConfig{}, true}, args{&clusterd.Context{}, true}, false, false}, - {"on pvc with raw", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 8}}, "", config.StoreConfig{}, true}, args{&clusterd.Context{}, true}, true, false}, - {"non-pvc with lvm nautilus", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 13}}, "", config.StoreConfig{}, false}, args{&clusterd.Context{}, false}, false, false}, - {"non-pvc with lvm octopus", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 15, Minor: 2, Extra: 8}}, "", config.StoreConfig{}, false}, args{&clusterd.Context{}, false}, false, false}, - {"non-pvc with raw nautilus simple scenario supported", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 14}}, "", config.StoreConfig{}, false}, args{&clusterd.Context{}, false}, true, false}, - {"non-pvc with raw octopus simple scenario supported", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 15, Minor: 2, Extra: 9}}, "", config.StoreConfig{}, false}, args{&clusterd.Context{}, false}, true, false}, - {"non-pvc with raw pacific simple scenario supported", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 1}}, "", config.StoreConfig{}, false}, args{&clusterd.Context{}, false}, true, false}, - {"non-pvc with lvm nautilus complex scenario not supported: encrypted", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 14}}, "", config.StoreConfig{EncryptedDevice: true}, false}, args{&clusterd.Context{}, false}, false, false}, - {"non-pvc with lvm octopus complex scenario not supported: encrypted", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 15, Minor: 2, Extra: 9}}, "", config.StoreConfig{EncryptedDevice: true}, false}, args{&clusterd.Context{}, false}, false, false}, - {"non-pvc with lvm nautilus complex scenario not supported: osd per device > 1", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 14}}, "", config.StoreConfig{OSDsPerDevice: 2}, false}, args{&clusterd.Context{}, false}, false, false}, - {"non-pvc with lvm octopus complex scenario not supported: osd per device > 1", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 15, Minor: 2, Extra: 9}}, "", config.StoreConfig{OSDsPerDevice: 2}, false}, args{&clusterd.Context{}, false}, false, false}, - {"non-pvc with lvm nautilus complex scenario not supported: metadata dev", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 14}}, "/dev/sdb", config.StoreConfig{}, false}, args{&clusterd.Context{}, false}, false, false}, - {"non-pvc with lvm octopus complex scenario not supported: metadata dev", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 15, Minor: 2, Extra: 9}}, "/dev/sdb", config.StoreConfig{}, false}, args{&clusterd.Context{}, false}, false, false}, - {"non-pvc with lvm pacific complex scenario not supported: metadata dev", fields{&cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 1}}, "/dev/sdb", config.StoreConfig{}, false}, args{&clusterd.Context{}, false}, false, false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - a := &OsdAgent{ - clusterInfo: tt.fields.clusterInfo, - metadataDevice: tt.fields.metadataDevice, - storeConfig: tt.fields.storeConfig, - pvcBacked: tt.fields.pvcBacked, - } - got, err := a.useRawMode(tt.args.context, tt.args.pvcBacked) - if (err != nil) != tt.wantErr { - t.Errorf("OsdAgent.useRawMode() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("OsdAgent.useRawMode() = %v, want %v", got, tt.want) - } - }) - } -} - -func contains(arr []string, str string) bool { - for _, a := range arr { - if a == str { - return true - } - } - return false -} - -func TestAppendOSDInfo(t *testing.T) { - // Set 1: duplicate entries - { - currentOSDs := []oposd.OSDInfo{ - {ID: 0, Cluster: "ceph", UUID: "275950b5-dcb3-4c3e-b0df-014b16755dc5", DevicePartUUID: "", BlockPath: "/dev/ceph-48b22180-8358-4ab4-aec0-3fb83a20328b/osd-block-275950b5-dcb3-4c3e-b0df-014b16755dc5", MetadataPath: "", WalPath: "", SkipLVRelease: false, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "lvm", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - {ID: 1, Cluster: "ceph", UUID: "3206c1c0-7ea2-412b-bd42-708cfe5e4acb", DevicePartUUID: "", BlockPath: "/dev/ceph-140a1344-636d-4442-85b3-bb3cd18ca002/osd-block-3206c1c0-7ea2-412b-bd42-708cfe5e4acb", MetadataPath: "", WalPath: "", SkipLVRelease: false, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "lvm", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - {ID: 2, Cluster: "ceph", UUID: "7ea5e98b-755c-4837-a2a3-9ad61e67cf6f", DevicePartUUID: "", BlockPath: "/dev/ceph-0c466524-57a3-4e5f-b4e3-04538ff0aced/osd-block-7ea5e98b-755c-4837-a2a3-9ad61e67cf6f", MetadataPath: "", WalPath: "", SkipLVRelease: false, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "lvm", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - } - newOSDs := []oposd.OSDInfo{ - {ID: 2, Cluster: "ceph", UUID: "7ea5e98b-755c-4837-a2a3-9ad61e67cf6f", DevicePartUUID: "", BlockPath: "/dev/mapper/ceph--0c466524--57a3--4e5f--b4e3--04538ff0aced-osd--block--7ea5e98b--755c--4837--a2a3--9ad61e67cf6f", MetadataPath: "", WalPath: "", SkipLVRelease: true, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "raw", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - {ID: 0, Cluster: "ceph", UUID: "275950b5-dcb3-4c3e-b0df-014b16755dc5", DevicePartUUID: "", BlockPath: "/dev/mapper/ceph--48b22180--8358--4ab4--aec0--3fb83a20328b-osd--block--275950b5--dcb3--4c3e--b0df--014b16755dc5", MetadataPath: "", WalPath: "", SkipLVRelease: true, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "raw", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - {ID: 1, Cluster: "ceph", UUID: "3206c1c0-7ea2-412b-bd42-708cfe5e4acb", DevicePartUUID: "", BlockPath: "/dev/mapper/ceph--140a1344--636d--4442--85b3--bb3cd18ca002-osd--block--3206c1c0--7ea2--412b--bd42--708cfe5e4acb", MetadataPath: "", WalPath: "", SkipLVRelease: true, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "raw", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - } - trimmedOSDs := appendOSDInfo(currentOSDs, newOSDs) - assert.Equal(t, 3, len(trimmedOSDs)) - assert.NotContains(t, trimmedOSDs[0].BlockPath, "mapper") - } - - // Set 2: no duplicate entries, just a mix of RAW and LVM OSDs should not trim anything - { - currentOSDs := []oposd.OSDInfo{ - {ID: 0, Cluster: "ceph", UUID: "275950b5-dcb3-4c3e-b0df-014b16755dc5", DevicePartUUID: "", BlockPath: "/dev/ceph-48b22180-8358-4ab4-aec0-3fb83a20328b/osd-block-275950b5-dcb3-4c3e-b0df-014b16755dc5", MetadataPath: "", WalPath: "", SkipLVRelease: false, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "lvm", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - {ID: 1, Cluster: "ceph", UUID: "3206c1c0-7ea2-412b-bd42-708cfe5e4acb", DevicePartUUID: "", BlockPath: "/dev/ceph-140a1344-636d-4442-85b3-bb3cd18ca002/osd-block-3206c1c0-7ea2-412b-bd42-708cfe5e4acb", MetadataPath: "", WalPath: "", SkipLVRelease: false, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "lvm", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - {ID: 2, Cluster: "ceph", UUID: "7ea5e98b-755c-4837-a2a3-9ad61e67cf6f", DevicePartUUID: "", BlockPath: "/dev/ceph-0c466524-57a3-4e5f-b4e3-04538ff0aced/osd-block-7ea5e98b-755c-4837-a2a3-9ad61e67cf6f", MetadataPath: "", WalPath: "", SkipLVRelease: false, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "lvm", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - } - newOSDs := []oposd.OSDInfo{ - {ID: 3, Cluster: "ceph", UUID: "35e61dbc-4455-45fd-b5c8-39be2a29db02", DevicePartUUID: "", BlockPath: "/dev/sdb", MetadataPath: "", WalPath: "", SkipLVRelease: true, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "raw", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - {ID: 4, Cluster: "ceph", UUID: "f5c0ce2c-76ee-4cbf-94df-9e480da6c614", DevicePartUUID: "", BlockPath: "/dev/sdd", MetadataPath: "", WalPath: "", SkipLVRelease: true, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "raw", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - {ID: 5, Cluster: "ceph", UUID: "4aadb152-2b30-477a-963e-44447ded6a66", DevicePartUUID: "", BlockPath: "/dev/sde", MetadataPath: "", WalPath: "", SkipLVRelease: true, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "raw", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - } - trimmedOSDs := appendOSDInfo(currentOSDs, newOSDs) - assert.Equal(t, 6, len(trimmedOSDs)) - } - // Set 3: no current OSDs (no LVM, just RAW) - { - currentOSDs := []oposd.OSDInfo{} - newOSDs := []oposd.OSDInfo{ - {ID: 3, Cluster: "ceph", UUID: "35e61dbc-4455-45fd-b5c8-39be2a29db02", DevicePartUUID: "", BlockPath: "/dev/sdb", MetadataPath: "", WalPath: "", SkipLVRelease: true, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "raw", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - {ID: 4, Cluster: "ceph", UUID: "f5c0ce2c-76ee-4cbf-94df-9e480da6c614", DevicePartUUID: "", BlockPath: "/dev/sdd", MetadataPath: "", WalPath: "", SkipLVRelease: true, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "raw", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - {ID: 5, Cluster: "ceph", UUID: "4aadb152-2b30-477a-963e-44447ded6a66", DevicePartUUID: "", BlockPath: "/dev/sde", MetadataPath: "", WalPath: "", SkipLVRelease: true, Location: "root=default host=minikube rack=rack1 zone=b", LVBackedPV: false, CVMode: "raw", Store: "bluestore", TopologyAffinity: "topology.rook.io/rack=rack1"}, - } - trimmedOSDs := appendOSDInfo(currentOSDs, newOSDs) - assert.Equal(t, 3, len(trimmedOSDs)) - } -} diff --git a/pkg/daemon/ceph/util/util.go b/pkg/daemon/ceph/util/util.go deleted file mode 100644 index bfa87a665..000000000 --- a/pkg/daemon/ceph/util/util.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "io/ioutil" - "net" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" -) - -const ( - RBDSysBusPathDefault = "/sys/bus/rbd" - RBDDevicesDir = "devices" - RBDDevicePathPrefix = "/dev/rbd" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-ceph-util") - -// FindRBDMappedFile search for the mapped RBD volume and returns its device path -func FindRBDMappedFile(imageName, poolName, sysBusDir string) (string, error) { - - sysBusDeviceDir := filepath.Join(sysBusDir, RBDDevicesDir) - // if sysPath does not exist, no attachments has happened - if _, err := os.Stat(sysBusDeviceDir); os.IsNotExist(err) { - return "", nil - } - - files, err := ioutil.ReadDir(sysBusDeviceDir) - if err != nil { - return "", errors.Wrap(err, "failed to read rbd device dir") - } - - for _, idFile := range files { - nameContent, err := ioutil.ReadFile(filepath.Clean(filepath.Join(sysBusDeviceDir, idFile.Name(), "name"))) - if err == nil && imageName == strings.TrimSpace(string(nameContent)) { - // the image for the current rbd device matches, now try to match pool - poolContent, err := ioutil.ReadFile(filepath.Clean(filepath.Join(sysBusDeviceDir, idFile.Name(), "pool"))) - if err == nil && poolName == strings.TrimSpace(string(poolContent)) { - // match current device matches both image name and pool name, return the device - return idFile.Name(), nil - } - } - } - return "", nil -} - -// GetIPFromEndpoint return the IP from an endpoint string (192.168.0.1:6789) -func GetIPFromEndpoint(endpoint string) string { - host, _, err := net.SplitHostPort(endpoint) - if err != nil { - logger.Errorf("failed to split ip and port for endpoint %q. %v", endpoint, err) - } - return host -} - -// GetPortFromEndpoint return the port from an endpoint string (192.168.0.1:6789) -func GetPortFromEndpoint(endpoint string) int32 { - var port int - _, portString, err := net.SplitHostPort(endpoint) - if err != nil { - logger.Errorf("failed to split host and port for endpoint %q, assuming default Ceph port %q. %v", endpoint, portString, err) - } else { - // #nosec G109 using Atoi to convert type into int is not a real risk - port, err = strconv.Atoi(portString) - if err != nil { - logger.Errorf("failed to convert %q to integer. %v", portString, err) - } - } - return int32(port) -} diff --git a/pkg/daemon/ceph/util/util_test.go b/pkg/daemon/ceph/util/util_test.go deleted file mode 100644 index b56bc3c40..000000000 --- a/pkg/daemon/ceph/util/util_test.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestFindDevicePath(t *testing.T) { - // set up a mock RBD sys bus file system - mockRBDSysBusPath, err := ioutil.TempDir("", "TestFindDevicePath") - if err != nil { - t.Fatalf("failed to create temp rbd sys bus dir: %+v", err) - } - defer os.RemoveAll(mockRBDSysBusPath) - dev0Path := filepath.Join(mockRBDSysBusPath, "devices", "3") - err = os.MkdirAll(dev0Path, 0777) - assert.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(dev0Path, "name"), []byte("myimage1"), 0600) - assert.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(dev0Path, "pool"), []byte("mypool1"), 0600) - assert.NoError(t, err) - mappedImageFile, _ := FindRBDMappedFile("myimage1", "mypool1", mockRBDSysBusPath) - assert.Equal(t, "3", mappedImageFile) -} diff --git a/pkg/daemon/discover/discover.go b/pkg/daemon/discover/discover.go deleted file mode 100644 index e735fa824..000000000 --- a/pkg/daemon/discover/discover.go +++ /dev/null @@ -1,506 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package discover to discover unused devices. -package discover - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "os" - "os/exec" - "os/signal" - "path" - "regexp" - "strings" - "syscall" - "time" - - "github.com/coreos/pkg/capnslog" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/sys" - - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - discoverDaemonUdev = "DISCOVER_DAEMON_UDEV_BLACKLIST" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "rook-discover") - // AppName is the name of the pod - AppName = "rook-discover" - // NodeAttr is the attribute of that node - NodeAttr = "rook.io/node" - // LocalDiskCMData is the data name of the config map storing devices - LocalDiskCMData = "devices" - // LocalDiskCMName is name of the config map storing devices - LocalDiskCMName = "local-device-%s" - nodeName string - namespace string - lastDevice string - cmName string - cm *v1.ConfigMap - udevEventPeriod = time.Duration(5) * time.Second - useCVInventory bool -) - -// CephVolumeInventory is the Go struct representation of the json output -type CephVolumeInventory struct { - Path string `json:"path"` - Available bool `json:"available"` - RejectedReasons json.RawMessage `json:"rejected_reasons"` - SysAPI json.RawMessage `json:"sys_api"` - LVS json.RawMessage `json:"lvs"` -} - -// Run is the entry point of that package execution -func Run(context *clusterd.Context, probeInterval time.Duration, useCV bool) error { - if context == nil { - return fmt.Errorf("nil context") - } - logger.Debugf("device discovery interval is %q", probeInterval.String()) - logger.Debugf("use ceph-volume inventory is %t", useCV) - nodeName = os.Getenv(k8sutil.NodeNameEnvVar) - namespace = os.Getenv(k8sutil.PodNamespaceEnvVar) - cmName = k8sutil.TruncateNodeName(LocalDiskCMName, nodeName) - useCVInventory = useCV - sigc := make(chan os.Signal, 1) - signal.Notify(sigc, syscall.SIGTERM) - - err := updateDeviceCM(context) - if err != nil { - logger.Infof("failed to update device configmap: %v", err) - return err - } - - udevEvents := make(chan string) - go udevBlockMonitor(udevEvents, udevEventPeriod) - for { - select { - case <-sigc: - logger.Infof("shutdown signal received, exiting...") - return nil - case <-time.After(probeInterval): - if err := updateDeviceCM(context); err != nil { - logger.Errorf("failed to update device configmap during probe interval. %v", err) - } - case _, ok := <-udevEvents: - if ok { - logger.Info("trigger probe from udev event") - if err := updateDeviceCM(context); err != nil { - logger.Errorf("failed to update device configmap triggered from udev event. %v", err) - } - } else { - logger.Warningf("disabling udev monitoring") - udevEvents = nil - } - } - } -} - -func matchUdevEvent(text string, matches, exclusions []string) (bool, error) { - for _, match := range matches { - matched, err := regexp.MatchString(match, text) - if err != nil { - return false, fmt.Errorf("failed to search string: %v", err) - } - if matched { - hasExclusion := false - for _, exclusion := range exclusions { - matched, err = regexp.MatchString(exclusion, text) - if err != nil { - return false, fmt.Errorf("failed to search string: %v", err) - } - if matched { - hasExclusion = true - break - } - } - if !hasExclusion { - logger.Infof("udevadm monitor: matched event: %s", text) - return true, nil - } - } - } - return false, nil -} - -// Scans `udevadm monitor` output for block sub-system events. Each line of -// output matching a set of substrings is sent to the provided channel. An event -// is returned if it passes any matches tests, and passes all exclusion tests. -func rawUdevBlockMonitor(c chan string, matches, exclusions []string) { - defer close(c) - - // stdbuf -oL performs line buffered output - cmd := exec.Command("stdbuf", "-oL", "udevadm", "monitor", "-u", "-k", "-s", "block") - stdout, err := cmd.StdoutPipe() - if err != nil { - logger.Warningf("Cannot open udevadm stdout: %v", err) - return - } - - err = cmd.Start() - if err != nil { - logger.Warningf("Cannot start udevadm monitoring: %v", err) - return - } - - scanner := bufio.NewScanner(stdout) - for scanner.Scan() { - text := scanner.Text() - logger.Debugf("udevadm monitor: %s", text) - match, err := matchUdevEvent(text, matches, exclusions) - if err != nil { - logger.Warningf("udevadm filtering failed: %v", err) - return - } - if match { - c <- text - } - } - - if err := scanner.Err(); err != nil { - logger.Warningf("udevadm monitor scanner error: %v", err) - } - - logger.Info("udevadm monitor finished") -} - -// Monitors udev for block device changes, and collapses these events such that -// only one event is emitted per period in order to deal with flapping. -func udevBlockMonitor(c chan string, period time.Duration) { - defer close(c) - var udevFilter []string - - // return any add or remove events, but none that match device mapper - // events. string matching is case-insensitive - events := make(chan string) - - // get discoverDaemonUdevBlacklist from the environment variable - // if user doesn't provide any regex; generate the default regex - // else use the regex provided by user - discoverUdev := os.Getenv(discoverDaemonUdev) - if discoverUdev == "" { - discoverUdev = "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" - } - udevFilter = strings.Split(discoverUdev, ",") - logger.Infof("using the regular expressions %q", udevFilter) - - go rawUdevBlockMonitor(events, - []string{"(?i)add", "(?i)remove"}, - udevFilter) - - for { - event, ok := <-events - if !ok { - return - } - timeout := time.NewTimer(period) - for { - select { - case <-timeout.C: - break - case _, ok := <-events: - if !ok { - return - } - continue - } - break - } - c <- event - } -} - -func ignoreDevice(dev sys.LocalDisk) bool { - return strings.Contains(strings.ToUpper(dev.DevLinks), "USB") -} - -func checkMatchingDevice(checkDev sys.LocalDisk, devices []sys.LocalDisk) *sys.LocalDisk { - for i, dev := range devices { - if ignoreDevice(dev) { - continue - } - // check if devices should be considered the same. the uuid can be - // unstable, so we also use the reported serial and device name, which - // appear to be more stable. - if checkDev.UUID == dev.UUID { - return &devices[i] - } - - // on virt-io devices in libvirt, the serial is reported as an empty - // string, so also account for that. - if checkDev.Serial == dev.Serial && checkDev.Serial != "" { - return &devices[i] - } - - if checkDev.Name == dev.Name { - return &devices[i] - } - } - return nil -} - -// note that the idea of equality here may not be intuitive. equality of device -// sets refers to a state in which no change has been observed between the sets -// of devices that would warrant changes to their consumption by storage -// daemons. for example, if a device appears to have been wiped vs a device -// appears to now be in use. -func checkDeviceListsEqual(oldDevs, newDevs []sys.LocalDisk) bool { - for _, oldDev := range oldDevs { - if ignoreDevice(oldDev) { - continue - } - match := checkMatchingDevice(oldDev, newDevs) - if match == nil { - // device has been removed - return false - } - if !oldDev.Empty && match.Empty { - // device has changed from non-empty to empty - return false - } - if oldDev.Partitions != nil && match.Partitions == nil { - return false - } - if string(oldDev.CephVolumeData) == "" && string(match.CephVolumeData) != "" { - // return ceph volume inventory data was not enabled before - return false - } - } - - for _, newDev := range newDevs { - if ignoreDevice(newDev) { - continue - } - match := checkMatchingDevice(newDev, oldDevs) - if match == nil { - // device has been added - return false - } - // the matching case is handled in the previous join - } - - return true -} - -// DeviceListsEqual checks whether 2 lists are equal or not -func DeviceListsEqual(old, new string) (bool, error) { - var oldDevs []sys.LocalDisk - var newDevs []sys.LocalDisk - - err := json.Unmarshal([]byte(old), &oldDevs) - if err != nil { - return false, fmt.Errorf("cannot unmarshal devices: %+v", err) - } - - err = json.Unmarshal([]byte(new), &newDevs) - if err != nil { - return false, fmt.Errorf("cannot unmarshal devices: %+v", err) - } - - return checkDeviceListsEqual(oldDevs, newDevs), nil -} - -func updateDeviceCM(clusterdContext *clusterd.Context) error { - ctx := context.TODO() - logger.Infof("updating device configmap") - devices, err := probeDevices(clusterdContext) - if err != nil { - logger.Infof("failed to probe devices: %v", err) - return err - } - deviceJSON, err := json.Marshal(devices) - if err != nil { - logger.Infof("failed to marshal: %v", err) - return err - } - - deviceStr := string(deviceJSON) - if cm == nil { - cm, err = clusterdContext.Clientset.CoreV1().ConfigMaps(namespace).Get(ctx, cmName, metav1.GetOptions{}) - } - if err == nil { - lastDevice = cm.Data[LocalDiskCMData] - logger.Debugf("last devices %s", lastDevice) - } else { - if !kerrors.IsNotFound(err) { - logger.Infof("failed to get configmap: %v", err) - return err - } - - data := make(map[string]string, 1) - data[LocalDiskCMData] = deviceStr - - // the map doesn't exist yet, create it now - cm = &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cmName, - Namespace: namespace, - Labels: map[string]string{ - k8sutil.AppAttr: AppName, - NodeAttr: nodeName, - }, - }, - Data: data, - } - - // Get the discover daemon pod details to attach the owner reference to the config map - discoverPod, err := k8sutil.GetRunningPod(clusterdContext.Clientset) - if err != nil { - logger.Warningf("failed to get discover pod to set ownerref. %+v", err) - } else { - k8sutil.SetOwnerRefsWithoutBlockOwner(&cm.ObjectMeta, discoverPod.OwnerReferences) - } - - cm, err = clusterdContext.Clientset.CoreV1().ConfigMaps(namespace).Create(ctx, cm, metav1.CreateOptions{}) - if err != nil { - logger.Infof("failed to create configmap: %v", err) - return fmt.Errorf("failed to create local device map %s: %+v", cmName, err) - } - lastDevice = deviceStr - } - devicesEqual, err := DeviceListsEqual(lastDevice, deviceStr) - if err != nil { - return fmt.Errorf("failed to compare device lists: %v", err) - } - if !devicesEqual { - data := make(map[string]string, 1) - data[LocalDiskCMData] = deviceStr - cm.Data = data - cm, err = clusterdContext.Clientset.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{}) - if err != nil { - logger.Infof("failed to update configmap %s: %v", cmName, err) - return err - } - } - return nil -} - -func logDevices(devices []*sys.LocalDisk) { - var devicesList []string - for _, device := range devices { - logger.Debugf("localdevice %q: %+v", device.Name, device) - devicesList = append(devicesList, device.Name) - } - logger.Infof("localdevices: %q", strings.Join(devicesList, ", ")) -} - -func probeDevices(context *clusterd.Context) ([]sys.LocalDisk, error) { - devices := make([]sys.LocalDisk, 0) - localDevices, err := clusterd.DiscoverDevices(context.Executor) - if err != nil { - return devices, fmt.Errorf("failed initial hardware discovery. %+v", err) - } - - logDevices(localDevices) - - // ceph-volume inventory command takes a little time to complete. - // Get this data only if it is needed and once by function execution - var cvInventory *map[string]string = nil - if useCVInventory { - logger.Infof("Getting ceph-volume inventory information") - cvInventory, err = getCephVolumeInventory(context) - if err != nil { - logger.Errorf("error getting ceph-volume inventory: %v", err) - } - } - - for _, device := range localDevices { - if device == nil { - continue - } - if device.Type == sys.PartType { - continue - } - - partitions, _, err := sys.GetDevicePartitions(device.Name, context.Executor) - if err != nil { - logger.Infof("failed to check device partitions %s: %v", device.Name, err) - continue - } - - // check if there is a file system on the device - fs, err := sys.GetDeviceFilesystems(device.Name, context.Executor) - if err != nil { - logger.Infof("failed to check device filesystem %s: %v", device.Name, err) - continue - } - device.Partitions = partitions - device.Filesystem = fs - device.Empty = clusterd.GetDeviceEmpty(device) - - // Add the information provided by ceph-volume inventory - if cvInventory != nil { - CVData, deviceExists := (*cvInventory)[path.Join("/dev/", device.Name)] - if deviceExists { - device.CephVolumeData = CVData - } else { - logger.Errorf("ceph-volume information for device %q not found", device.Name) - } - } else { - device.CephVolumeData = "" - } - - devices = append(devices, *device) - } - - logger.Infof("available devices: %+v", devices) - return devices, nil -} - -// getCephVolumeInventory: Return a map of strings indexed by device with the -// information about the device returned by the command -func getCephVolumeInventory(context *clusterd.Context) (*map[string]string, error) { - inventory, err := context.Executor.ExecuteCommandWithOutput("ceph-volume", "inventory", "--format", "json") - if err != nil { - return nil, fmt.Errorf("failed to execute ceph-volume inventory. %+v", err) - } - - // Return a map with the information of each device indexed by path - CVDevices := make(map[string]string) - - // No data retrieved from ceph-volume - if inventory == "" { - return &CVDevices, nil - } - - // Get a slice to store the json data - bInventory := []byte(inventory) - var CVInventory []CephVolumeInventory - err = json.Unmarshal(bInventory, &CVInventory) - if err != nil { - return &CVDevices, fmt.Errorf("error unmarshalling json data coming from ceph-volume inventory. %v", err) - } - - for _, device := range CVInventory { - jsonData, err := json.Marshal(device) - if err != nil { - logger.Errorf("error marshaling json data for device: %v", device.Path) - } else { - CVDevices[device.Path] = string(jsonData) - } - } - - return &CVDevices, nil -} diff --git a/pkg/daemon/discover/discover_test.go b/pkg/daemon/discover/discover_test.go deleted file mode 100644 index 97c21aa50..000000000 --- a/pkg/daemon/discover/discover_test.go +++ /dev/null @@ -1,405 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package discover to discover unused devices. -package discover - -import ( - "fmt" - "testing" - - "github.com/rook/rook/pkg/clusterd" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/rook/rook/pkg/util/sys" - - "github.com/stretchr/testify/assert" -) - -const ( - udevOutput = `DEVLINKS=/dev/disk/by-id/scsi-36001405d27e5d898829468b90ce4ef8c /dev/disk/by-id/wwn-0x6001405d27e5d898829468b90ce4ef8c /dev/disk/by-path/ip-127.0.0.1:3260-iscsi-iqn.2016-06.world.srv:storage.target01-lun-0 /dev/disk/by-uuid/f2d38cba-37da-411d-b7ba-9a6696c58174 -DEVNAME=/dev/sdk -DEVPATH=/devices/platform/host6/session2/target6:0:0/6:0:0:0/block/sdk -DEVTYPE=disk -ID_BUS=scsi -ID_FS_TYPE=ext2 -ID_FS_USAGE=filesystem -ID_FS_UUID=f2d38cba-37da-411d-b7ba-9a6696c58174 -ID_FS_UUID_ENC=f2d38cba-37da-411d-b7ba-9a6696c58174 -ID_FS_VERSION=1.0 -ID_MODEL=disk01 -ID_MODEL_ENC=disk01\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20 -ID_PATH=ip-127.0.0.1:3260-iscsi-iqn.2016-06.world.srv:storage.target01-lun-0 -ID_PATH_TAG=ip-127_0_0_1_3260-iscsi-iqn_2016-06_world_srv_storage_target01-lun-0 -ID_REVISION=4.0 -ID_SCSI=1 -ID_SCSI_SERIAL=d27e5d89-8829-468b-90ce-4ef8c02f07fe -ID_SERIAL=36001405d27e5d898829468b90ce4ef8c -ID_SERIAL_SHORT=6001405d27e5d898829468b90ce4ef8c -ID_TARGET_PORT=0 -ID_TYPE=disk -ID_VENDOR=LIO-ORG -ID_VENDOR_ENC=LIO-ORG\x20 -ID_WWN=0x6001405d27e5d898 -ID_WWN_VENDOR_EXTENSION=0x829468b90ce4ef8c -ID_WWN_WITH_EXTENSION=0x6001405d27e5d898829468b90ce4ef8c -MAJOR=8 -MINOR=160 -SUBSYSTEM=block -TAGS=:systemd: -USEC_INITIALIZED=15981915740802 -` - sgdiskOutput = `Disk /dev/sdb: 20971520 sectors, 10.0 GiB -Logical sector size: 512 bytes -Disk identifier (GUID): 819C2F95-7015-438F-A624-D40DBA2C2069 -Partition table holds up to 128 entries -` -) - -func TestProbeDevices(t *testing.T) { - // set up mock execute so we can verify the partitioning happens on sda - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("RUN Command %s %v", command, args) - output := "" - if args[0] == "--all" { - output = "testa" - } else if args[0] == "/dev/testa" { - output = `SIZE="249510756352" ROTA="1" RO="0" TYPE="disk" PKNAME=""` - } else if args[0] == "info" && args[1] == "--query=property" { - output = udevOutput - } else if args[0] == "--print" && args[1] == "/dev/testa" { - output = sgdiskOutput - } - return output, nil - } - - context := &clusterd.Context{Executor: executor} - - devices, err := probeDevices(context) - assert.Nil(t, err) - assert.Equal(t, 1, len(devices)) - assert.Equal(t, "ext2", devices[0].Filesystem) -} - -func TestMatchUdevMonitorFiltering(t *testing.T) { - // f <- matching function as configured - f := func(text string) bool { - take, err := matchUdevEvent(text, []string{"(?i)add", "(?i)remove"}, []string{"(?i)dm-[0-9]+"}) - assert.NoError(t, err) - return take - } - - // add events are emitted - take := f("KERNEL[1008.734088] add /devices/pci0000:00/0000:00:07.0/virtio5/block/vdc (block)") - assert.True(t, take) - - // remove events are emitted - take = f("KERNEL[1104.287884] remove /devices/pci0000:00/0000:00:07.0/virtio5/block/vdc (block)") - assert.True(t, take) - - // change events are ignored - take = f("KERNEL[1136.069071] change /devices/pci0000:00/0000:00:02.0/virtio0/block/vda/vda1 (block)") - assert.False(t, take) - - // add events that match device mapper events are ignored - take = f("KERNEL[1042.464238] add /devices/virtual/block/dm-1 (block)") - assert.False(t, take) -} - -func TestDeviceListsEqual(t *testing.T) { - // empty lists are equal - assert.True(t, checkDeviceListsEqual( - []sys.LocalDisk{}, - []sys.LocalDisk{}, - )) - - // default constructed LocalDisks are equal - assert.True(t, checkDeviceListsEqual( - []sys.LocalDisk{ - {}, - }, - []sys.LocalDisk{ - {}, - }, - )) - - // a disk is removed - assert.False(t, checkDeviceListsEqual( - []sys.LocalDisk{ - {}, - }, - []sys.LocalDisk{}, - )) - - // a disk is added - assert.False(t, checkDeviceListsEqual( - []sys.LocalDisk{}, - []sys.LocalDisk{ - {}, - }, - )) - - // devices with usb keyword are ignored. the lists should be equal - assert.True(t, checkDeviceListsEqual( - []sys.LocalDisk{ - { - DevLinks: "xyzusbabc", - }, - }, - []sys.LocalDisk{}, - )) - - // devices with usb keyword are ignored. the lists should be equal - assert.True(t, checkDeviceListsEqual( - []sys.LocalDisk{}, - []sys.LocalDisk{ - { - DevLinks: "xyzusbabc", - }, - }, - )) - - // equal if uuid is equal - assert.True(t, checkDeviceListsEqual( - []sys.LocalDisk{ - { - UUID: "u2", - Serial: "xxx", - Name: "xxx", - }, - }, - []sys.LocalDisk{ - { - UUID: "u2", - Serial: "s2", - Name: "n2", - }, - }, - )) - - // equal if serial is equal - assert.True(t, checkDeviceListsEqual( - []sys.LocalDisk{ - { - UUID: "xxx", - Serial: "s2", - Name: "xxx", - }, - }, - []sys.LocalDisk{ - { - UUID: "u2", - Serial: "s2", - Name: "n2", - }, - }, - )) - - // equal if device name is equal - assert.True(t, checkDeviceListsEqual( - []sys.LocalDisk{ - { - UUID: "xxx", - Serial: "xxx", - Name: "n2", - }, - }, - []sys.LocalDisk{ - { - UUID: "u2", - Serial: "s2", - Name: "n2", - }, - }, - )) - - // otherwise, not equal - assert.False(t, checkDeviceListsEqual( - []sys.LocalDisk{ - { - UUID: "xxx", - Serial: "xxx", - Name: "xxx", - }, - }, - []sys.LocalDisk{ - { - UUID: "u2", - Serial: "s2", - Name: "n2", - }, - }, - )) - - // device equality ignores an empty serial - assert.False(t, checkDeviceListsEqual( - []sys.LocalDisk{ - { - UUID: "xxx", - Serial: "", - Name: "xxx", - }, - }, - []sys.LocalDisk{ - { - UUID: "u2", - Serial: "", - Name: "n2", - }, - }, - )) - - // devices are the same, but transition from non-empty to empty. in this - // case we consider the lists to be non-equal (i.e. of interest to storage - // providers). - assert.False(t, checkDeviceListsEqual( - []sys.LocalDisk{ - { - UUID: "uuid", - Empty: false, - }, - }, - []sys.LocalDisk{ - { - UUID: "uuid", - Empty: true, - }, - }, - )) - - // devices are the same, but transition from empty to non-empty (e.g. the - // dev is now in use). in this case we consider the lists to be equal (i.e. - // no interesting change). - assert.True(t, checkDeviceListsEqual( - []sys.LocalDisk{ - { - UUID: "uuid", - Empty: true, - }, - }, - []sys.LocalDisk{ - { - UUID: "uuid", - Empty: false, - }, - }, - )) - - // devices are the same, but the partition table is cleared. this would be - // of interest to storage providers! - assert.False(t, checkDeviceListsEqual( - []sys.LocalDisk{ - { - UUID: "uuid", - Partitions: []sys.Partition{ - {}, - }, - }, - }, - []sys.LocalDisk{ - { - UUID: "uuid", - Partitions: nil, - }, - }, - )) - - // devices are the same, but the partition table has been created. not so - // interesting. - assert.True(t, checkDeviceListsEqual( - []sys.LocalDisk{ - { - UUID: "uuid", - Partitions: nil, - }, - }, - []sys.LocalDisk{ - { - UUID: "uuid", - Partitions: []sys.Partition{ - {}, - }, - }, - }, - )) -} - -func TestGetCephVolumeInventory(t *testing.T) { - run := 0 - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, arg ...string) (string, error) { - run++ - logger.Infof("run %d command %s", run, command) - switch { - case run == 1: - return `[{"available": true, "rejected_reasons": [], "sys_api": {"scheduler_mode": "noop", -"rotational": "0", "vendor": "ATA", "human_readable_size": "25.00 GB", "sectors": 0, "sas_device_handle": "", -"partitions": {}, "rev": "1.0", "sas_address": "", "locked": 0, "sectorsize": "512", "removable": "0", "path": "/dev/sdb", -"support_discard": "", "model": "VBOX SMART HARDDISK", "ro": "0", "nr_requests": "128", "size": 26843545600.0}, -"lvs": [], "path": "/dev/sdb"}, {"available": false, "rejected_reasons": ["locked"], "sys_api": {"scheduler_mode": "noop", - "rotational": "1", "vendor": "ATA", "human_readable_size": "32.00 GB", "sectors": 0, "sas_device_handle": "", - "partitions": {"sda2": {"start": "2099200", "holders": ["dm-0", "dm-1"], "sectorsize": 512, "sectors": "65009664", - "size": "31.00 GB"}, "sda1": {"start": "2048", "holders": [], "sectorsize": 512, "sectors": "2097152", "size": "1024.00 MB"}}, - "rev": "1.0", "sas_address": "", "locked": 1, "sectorsize": "512", "removable": "0", "path": "/dev/sda", "support_discard": "", - "model": "VBOX HARDDISK", "ro": "0", "nr_requests": "128", "size": 34359738368.0}, "lvs": [{"comment": "not used by ceph", "name": "swap"}, - {"comment": "not used by ceph", "name": "root"}], "path": "/dev/sda"}] - `, nil - case run == 2: // No data returned from Ceph Volume - return ``, nil - case run == 3: // No devices returned from Ceph Volume - return `[]`, nil - case run == 4: // Error executing Ceph Volume - return ``, fmt.Errorf("unexplainable error") - case run == 5: // A device without sys_api data - return `[{"available": true }]`, nil - } - return "", nil - }, - } - - context := &clusterd.Context{Executor: executor} - - dev_sda := `{"path":"/dev/sda","available":false,"rejected_reasons":["locked"],"sys_api":{"scheduler_mode":"noop","rotational":"1","vendor":"ATA","human_readable_size":"32.00 GB","sectors":0,"sas_device_handle":"","partitions":{"sda2":{"start":"2099200","holders":["dm-0","dm-1"],"sectorsize":512,"sectors":"65009664","size":"31.00 GB"},"sda1":{"start":"2048","holders":[],"sectorsize":512,"sectors":"2097152","size":"1024.00 MB"}},"rev":"1.0","sas_address":"","locked":1,"sectorsize":"512","removable":"0","path":"/dev/sda","support_discard":"","model":"VBOX HARDDISK","ro":"0","nr_requests":"128","size":34359738368.0},"lvs":[{"comment":"not used by ceph","name":"swap"},{"comment":"not used by ceph","name":"root"}]}` - dev_sdb := `{"path":"/dev/sdb","available":true,"rejected_reasons":[],"sys_api":{"scheduler_mode":"noop","rotational":"0","vendor":"ATA","human_readable_size":"25.00 GB","sectors":0,"sas_device_handle":"","partitions":{},"rev":"1.0","sas_address":"","locked":0,"sectorsize":"512","removable":"0","path":"/dev/sdb","support_discard":"","model":"VBOX SMART HARDDISK","ro":"0","nr_requests":"128","size":26843545600.0},"lvs":[]}` - - // Normal execution - cvdata, err := getCephVolumeInventory(context) - - assert.Nil(t, err) - assert.Equal(t, len(*cvdata), 2) - assert.Equal(t, (*cvdata)["/dev/sda"], dev_sda) - assert.Equal(t, (*cvdata)["/dev/sdb"], dev_sdb) - - // No data returned from Ceph Volume - cvdata, err = getCephVolumeInventory(context) - assert.Nil(t, err) - assert.Equal(t, len(*cvdata), 0) - - // No devices returned from Ceph Volume - cvdata, err = getCephVolumeInventory(context) - assert.Nil(t, err) - assert.Equal(t, len(*cvdata), 0) - - // Error executing Ceph Volume - cvdata, err = getCephVolumeInventory(context) - assert.Error(t, err, "unexplainable error") - assert.Nil(t, cvdata, 0) - - // // A device without sys_api data - cvdata, err = getCephVolumeInventory(context) - assert.Nil(t, err) - assert.Equal(t, len(*cvdata), 1) -} diff --git a/pkg/daemon/util/cmdreporter.go b/pkg/daemon/util/cmdreporter.go index 0ce0d50c7..339faebdd 100644 --- a/pkg/daemon/util/cmdreporter.go +++ b/pkg/daemon/util/cmdreporter.go @@ -28,7 +28,7 @@ import ( "syscall" "github.com/coreos/pkg/capnslog" - "github.com/rook/rook/pkg/operator/k8sutil" + "github.com/rook/cassandra/pkg/operator/k8sutil" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,7 +50,7 @@ const ( ) var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "job-reporter-cmd") + logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "job-reporter-cmd") ) // CmdReporter is a process intended to be run in simple Kubernetes jobs. The CmdReporter runs a diff --git a/pkg/daemon/util/cmdreporter_test.go b/pkg/daemon/util/cmdreporter_test.go index da1980364..d05843d07 100644 --- a/pkg/daemon/util/cmdreporter_test.go +++ b/pkg/daemon/util/cmdreporter_test.go @@ -25,7 +25,7 @@ import ( "strings" "testing" - "github.com/rook/rook/pkg/operator/k8sutil" + "github.com/rook/cassandra/pkg/operator/k8sutil" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/operator/cassandra/controller/cleanup.go b/pkg/operator/cassandra/controller/cleanup.go index e69495dbb..ebefa8cdb 100644 --- a/pkg/operator/cassandra/controller/cleanup.go +++ b/pkg/operator/cassandra/controller/cleanup.go @@ -20,8 +20,8 @@ import ( "context" "fmt" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/controller/util" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/pkg/operator/cassandra/controller/cluster.go b/pkg/operator/cassandra/controller/cluster.go index 9238a21fc..bb4338a5f 100644 --- a/pkg/operator/cassandra/controller/cluster.go +++ b/pkg/operator/cassandra/controller/cluster.go @@ -20,9 +20,9 @@ import ( "context" "fmt" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/constants" + "github.com/rook/cassandra/pkg/operator/cassandra/controller/util" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/operator/cassandra/controller/cluster_test.go b/pkg/operator/cassandra/controller/cluster_test.go index d70c84428..2defea946 100644 --- a/pkg/operator/cassandra/controller/cluster_test.go +++ b/pkg/operator/cassandra/controller/cluster_test.go @@ -21,10 +21,10 @@ import ( "fmt" "testing" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" - casstest "github.com/rook/rook/pkg/operator/cassandra/test" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/constants" + "github.com/rook/cassandra/pkg/operator/cassandra/controller/util" + casstest "github.com/rook/cassandra/pkg/operator/cassandra/test" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/operator/cassandra/controller/controller.go b/pkg/operator/cassandra/controller/controller.go index 3fcd6619b..76ad59312 100644 --- a/pkg/operator/cassandra/controller/controller.go +++ b/pkg/operator/cassandra/controller/controller.go @@ -23,12 +23,12 @@ import ( "github.com/coreos/pkg/capnslog" "github.com/davecgh/go-spew/spew" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - rookClientset "github.com/rook/rook/pkg/client/clientset/versioned" - rookScheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - informersv1alpha1 "github.com/rook/rook/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1" - listersv1alpha1 "github.com/rook/rook/pkg/client/listers/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + rookClientset "github.com/rook/cassandra/pkg/client/clientset/versioned" + rookScheme "github.com/rook/cassandra/pkg/client/clientset/versioned/scheme" + informersv1alpha1 "github.com/rook/cassandra/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1" + listersv1alpha1 "github.com/rook/cassandra/pkg/client/listers/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/controller/util" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -52,7 +52,7 @@ const ( clusterQueueName = "cluster-queue" ) -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "cassandra-controller") +var logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "cassandra-controller") // ClusterController encapsulates all the tools the controller needs // in order to talk to the Kubernetes API diff --git a/pkg/operator/cassandra/controller/controller_test.go b/pkg/operator/cassandra/controller/controller_test.go index ac0cf877f..dfc28431e 100644 --- a/pkg/operator/cassandra/controller/controller_test.go +++ b/pkg/operator/cassandra/controller/controller_test.go @@ -20,9 +20,9 @@ import ( "testing" "time" - rookfake "github.com/rook/rook/pkg/client/clientset/versioned/fake" - rookScheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - rookinformers "github.com/rook/rook/pkg/client/informers/externalversions" + rookfake "github.com/rook/cassandra/pkg/client/clientset/versioned/fake" + rookScheme "github.com/rook/cassandra/pkg/client/clientset/versioned/scheme" + rookinformers "github.com/rook/cassandra/pkg/client/informers/externalversions" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" diff --git a/pkg/operator/cassandra/controller/service.go b/pkg/operator/cassandra/controller/service.go index 11b3eb268..70fbfa4bf 100644 --- a/pkg/operator/cassandra/controller/service.go +++ b/pkg/operator/cassandra/controller/service.go @@ -20,9 +20,9 @@ import ( "context" "strings" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/constants" + "github.com/rook/cassandra/pkg/operator/cassandra/controller/util" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/operator/cassandra/controller/sync.go b/pkg/operator/cassandra/controller/sync.go index 119c4269d..6de3a36e4 100644 --- a/pkg/operator/cassandra/controller/sync.go +++ b/pkg/operator/cassandra/controller/sync.go @@ -17,8 +17,8 @@ limitations under the License. package controller import ( - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/controller/util" corev1 "k8s.io/api/core/v1" ) diff --git a/pkg/operator/cassandra/controller/util/labels.go b/pkg/operator/cassandra/controller/util/labels.go index fd1b33097..43f862bc2 100644 --- a/pkg/operator/cassandra/controller/util/labels.go +++ b/pkg/operator/cassandra/controller/util/labels.go @@ -17,8 +17,8 @@ limitations under the License. package util import ( - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/constants" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/labels" ) diff --git a/pkg/operator/cassandra/controller/util/patch.go b/pkg/operator/cassandra/controller/util/patch.go index 80c2f0d10..cfdb0ea75 100644 --- a/pkg/operator/cassandra/controller/util/patch.go +++ b/pkg/operator/cassandra/controller/util/patch.go @@ -20,8 +20,8 @@ import ( "context" "encoding/json" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/client/clientset/versioned" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/client/clientset/versioned" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/operator/cassandra/controller/util/resource.go b/pkg/operator/cassandra/controller/util/resource.go index f60016bc2..f5f9bb3d8 100644 --- a/pkg/operator/cassandra/controller/util/resource.go +++ b/pkg/operator/cassandra/controller/util/resource.go @@ -18,9 +18,10 @@ package util import ( "fmt" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/k8sutil" + + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/constants" + "github.com/rook/cassandra/pkg/operator/k8sutil" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" diff --git a/pkg/operator/cassandra/controller/util/util.go b/pkg/operator/cassandra/controller/util/util.go index 2c6ed7b4b..a83579c6a 100644 --- a/pkg/operator/cassandra/controller/util/util.go +++ b/pkg/operator/cassandra/controller/util/util.go @@ -21,9 +21,9 @@ import ( "strconv" "strings" - cassandrarookio "github.com/rook/rook/pkg/apis/cassandra.rook.io" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" + cassandrarookio "github.com/rook/cassandra/pkg/apis/cassandra.rook.io" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/constants" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/operator/cassandra/sidecar/checks.go b/pkg/operator/cassandra/sidecar/checks.go index 10874d4a3..9113fbf3f 100644 --- a/pkg/operator/cassandra/sidecar/checks.go +++ b/pkg/operator/cassandra/sidecar/checks.go @@ -18,9 +18,10 @@ package sidecar import ( "fmt" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/yanniszark/go-nodetool/nodetool" "net/http" + + "github.com/rook/cassandra/pkg/operator/cassandra/constants" + "github.com/yanniszark/go-nodetool/nodetool" ) // setupHTTPChecks brings up the liveness and readiness probes diff --git a/pkg/operator/cassandra/sidecar/config.go b/pkg/operator/cassandra/sidecar/config.go index bb8060330..d2c15bd77 100644 --- a/pkg/operator/cassandra/sidecar/config.go +++ b/pkg/operator/cassandra/sidecar/config.go @@ -26,9 +26,9 @@ import ( "time" "github.com/ghodss/yaml" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/constants" + "github.com/rook/cassandra/pkg/operator/cassandra/controller/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/pkg/operator/cassandra/sidecar/sidecar.go b/pkg/operator/cassandra/sidecar/sidecar.go index 4452f45fd..c66e98810 100644 --- a/pkg/operator/cassandra/sidecar/sidecar.go +++ b/pkg/operator/cassandra/sidecar/sidecar.go @@ -27,9 +27,9 @@ import ( "github.com/coreos/pkg/capnslog" "github.com/davecgh/go-spew/spew" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - rookClientset "github.com/rook/rook/pkg/client/clientset/versioned" - "github.com/rook/rook/pkg/operator/cassandra/constants" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + rookClientset "github.com/rook/cassandra/pkg/client/clientset/versioned" + "github.com/rook/cassandra/pkg/operator/cassandra/constants" "github.com/yanniszark/go-nodetool/nodetool" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -70,7 +70,7 @@ func New( serviceInformer coreinformers.ServiceInformer, ) (*MemberController, error) { ctx := context.TODO() - logger := capnslog.NewPackageLogger("github.com/rook/rook", "sidecar") + logger := capnslog.NewPackageLogger("github.com/rook/cassandra", "sidecar") // Get the member's service var memberService *corev1.Service var err error diff --git a/pkg/operator/cassandra/sidecar/sync.go b/pkg/operator/cassandra/sidecar/sync.go index 4e5b91bfe..7776b1109 100644 --- a/pkg/operator/cassandra/sidecar/sync.go +++ b/pkg/operator/cassandra/sidecar/sync.go @@ -18,10 +18,11 @@ package sidecar import ( "fmt" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" + + "github.com/rook/cassandra/pkg/operator/cassandra/constants" + "github.com/rook/cassandra/pkg/operator/cassandra/controller/util" "github.com/yanniszark/go-nodetool/nodetool" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" ) func (m *MemberController) Sync(memberService *v1.Service) error { diff --git a/pkg/operator/cassandra/test/test.go b/pkg/operator/cassandra/test/test.go index a510fa9b2..feb8be62d 100644 --- a/pkg/operator/cassandra/test/test.go +++ b/pkg/operator/cassandra/test/test.go @@ -18,8 +18,9 @@ package test import ( "fmt" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" + + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/pkg/operator/cassandra/controller/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" diff --git a/pkg/operator/ceph/agent/agent.go b/pkg/operator/ceph/agent/agent.go deleted file mode 100644 index e173666b6..000000000 --- a/pkg/operator/ceph/agent/agent.go +++ /dev/null @@ -1,334 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package agent to manage Kubernetes storage attach events. -package agent - -import ( - "context" - "encoding/json" - "os" - "strconv" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - k8sutil "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -const ( - agentDaemonsetName = "rook-ceph-agent" - flexvolumePathDirEnv = "FLEXVOLUME_DIR_PATH" - libModulesPathDirEnv = "LIB_MODULES_DIR_PATH" - agentMountsEnv = "AGENT_MOUNTS" - flexvolumeDefaultDirPath = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" - agentDaemonsetPriorityClassNameEnv = "AGENT_PRIORITY_CLASS_NAME" - agentDaemonsetTolerationEnv = "AGENT_TOLERATION" - agentDaemonsetTolerationKeyEnv = "AGENT_TOLERATION_KEY" - agentDaemonsetTolerationsEnv = "AGENT_TOLERATIONS" - agentDaemonsetNodeAffinityEnv = "AGENT_NODE_AFFINITY" - AgentMountSecurityModeEnv = "AGENT_MOUNT_SECURITY_MODE" - RookEnableSelinuxRelabelingEnv = "ROOK_ENABLE_SELINUX_RELABELING" - RookEnableFSGroupEnv = "ROOK_ENABLE_FSGROUP" - - // MountSecurityModeAny "any" security mode for the agent for mount action - MountSecurityModeAny = "Any" - // MountSecurityModeRestricted restricted security mode for the agent for mount action - MountSecurityModeRestricted = "Restricted" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-agent") -) - -// New creates an instance of Agent -func New(clientset kubernetes.Interface) *Agent { - return &Agent{ - clientset: clientset, - } -} - -// Start the agent -func (a *Agent) Start(namespace, agentImage, serviceAccount string) error { - err := a.createAgentDaemonSet(namespace, agentImage, serviceAccount) - if err != nil { - return errors.Wrap(err, "error starting agent daemonset") - } - return nil -} - -func (a *Agent) createAgentDaemonSet(namespace, agentImage, serviceAccount string) error { - ctx := context.TODO() - flexvolumeDirPath, source := a.discoverFlexvolumeDir() - logger.Infof("discovered flexvolume dir path from source %s. value: %s", source, flexvolumeDirPath) - - libModulesDirPath := os.Getenv(libModulesPathDirEnv) - if libModulesDirPath == "" { - libModulesDirPath = "/lib/modules" - } - agentMountSecurityMode := os.Getenv(AgentMountSecurityModeEnv) - if agentMountSecurityMode == "" { - logger.Infof("no agent mount security mode given, defaulting to '%s' mode", MountSecurityModeAny) - agentMountSecurityMode = MountSecurityModeAny - } - if agentMountSecurityMode != MountSecurityModeAny && agentMountSecurityMode != MountSecurityModeRestricted { - return errors.Errorf("invalid agent mount security mode specified (given: %s)", agentMountSecurityMode) - } - - rookEnableSelinuxRelabeling := os.Getenv(RookEnableSelinuxRelabelingEnv) - _, err := strconv.ParseBool(rookEnableSelinuxRelabeling) - if err != nil { - logger.Warningf("Invalid %s value \"%s\". Defaulting to \"true\".", RookEnableSelinuxRelabelingEnv, rookEnableSelinuxRelabeling) - rookEnableSelinuxRelabeling = "true" - } - - rookEnableFSGroup := os.Getenv(RookEnableFSGroupEnv) - _, err = strconv.ParseBool(rookEnableFSGroup) - if err != nil { - logger.Warningf("Invalid %s value \"%s\". Defaulting to \"true\".", RookEnableFSGroupEnv, rookEnableFSGroup) - rookEnableFSGroup = "true" - } - - privileged := true - ds := &apps.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: agentDaemonsetName, - }, - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": agentDaemonsetName, - }, - }, - UpdateStrategy: apps.DaemonSetUpdateStrategy{ - Type: apps.RollingUpdateDaemonSetStrategyType, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app": agentDaemonsetName, - }, - }, - Spec: v1.PodSpec{ - ServiceAccountName: serviceAccount, - Containers: []v1.Container{ - { - Name: agentDaemonsetName, - Image: agentImage, - Args: []string{"ceph", "agent"}, - SecurityContext: &v1.SecurityContext{ - Privileged: &privileged, - }, - VolumeMounts: []v1.VolumeMount{ - { - Name: "flexvolume", - MountPath: "/flexmnt", - }, - { - Name: "dev", - MountPath: "/dev", - }, - { - Name: "sys", - MountPath: "/sys", - }, - { - Name: "libmodules", - MountPath: "/lib/modules", - }, - }, - Env: []v1.EnvVar{ - k8sutil.NamespaceEnvVar(), - k8sutil.NodeEnvVar(), - {Name: AgentMountSecurityModeEnv, Value: agentMountSecurityMode}, - {Name: RookEnableSelinuxRelabelingEnv, Value: rookEnableSelinuxRelabeling}, - {Name: RookEnableFSGroupEnv, Value: rookEnableFSGroup}, - }, - }, - }, - Volumes: []v1.Volume{ - { - Name: "flexvolume", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: flexvolumeDirPath, - }, - }, - }, - { - Name: "dev", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: "/dev", - }, - }, - }, - { - Name: "sys", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: "/sys", - }, - }, - }, - { - Name: "libmodules", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: libModulesDirPath, - }, - }, - }, - }, - HostNetwork: true, - PriorityClassName: os.Getenv(agentDaemonsetPriorityClassNameEnv), - }, - }, - }, - } - - // Add agent mounts if any given through environment - agentMounts := os.Getenv(agentMountsEnv) - if agentMounts != "" { - mounts := strings.Split(agentMounts, ",") - for _, mount := range mounts { - mountdef := strings.Split(mount, "=") - if len(mountdef) != 2 { - return errors.Errorf("badly formatted AGENT_MOUNTS %q. The format should be 'mountname=/host/path:/container/path,mountname2=/host/path2:/container/path2'", agentMounts) - } - mountname := mountdef[0] - paths := strings.Split(mountdef[1], ":") - if len(paths) != 2 { - return errors.Errorf("badly formatted AGENT_MOUNTS %q. The format should be 'mountname=/host/path:/container/path,mountname2=/host/path2:/container/path2'", agentMounts) - } - ds.Spec.Template.Spec.Containers[0].VolumeMounts = append(ds.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ - Name: mountname, - MountPath: paths[1], - }) - ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, v1.Volume{ - Name: mountname, - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: paths[0], - }, - }, - }) - } - } - - // Add toleration if any - tolerationValue := os.Getenv(agentDaemonsetTolerationEnv) - if tolerationValue != "" { - ds.Spec.Template.Spec.Tolerations = []v1.Toleration{ - { - Effect: v1.TaintEffect(tolerationValue), - Operator: v1.TolerationOpExists, - Key: os.Getenv(agentDaemonsetTolerationKeyEnv), - }, - } - } - - tolerationsRaw := os.Getenv(agentDaemonsetTolerationsEnv) - tolerations, err := k8sutil.YamlToTolerations(tolerationsRaw) - if err != nil { - logger.Warningf("failed to parse %q. %v", tolerationsRaw, err) - } - ds.Spec.Template.Spec.Tolerations = append(ds.Spec.Template.Spec.Tolerations, tolerations...) - - // Add NodeAffinity if any - nodeAffinity := os.Getenv(agentDaemonsetNodeAffinityEnv) - if nodeAffinity != "" { - v1NodeAffinity, err := k8sutil.GenerateNodeAffinity(nodeAffinity) - if err != nil { - logger.Errorf("failed to create NodeAffinity. %v", err) - } else { - ds.Spec.Template.Spec.Affinity = &v1.Affinity{ - NodeAffinity: v1NodeAffinity, - } - } - } - - _, err = a.clientset.AppsV1().DaemonSets(namespace).Create(ctx, ds, metav1.CreateOptions{}) - if err != nil { - if !k8serrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create rook-ceph-agent daemon set") - } - logger.Infof("rook-ceph-agent daemonset already exists, updating ...") - _, err = a.clientset.AppsV1().DaemonSets(namespace).Update(ctx, ds, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrap(err, "failed to update rook-ceph-agent daemon set") - } - } else { - logger.Infof("rook-ceph-agent daemonset started") - } - return nil - -} - -func (a *Agent) discoverFlexvolumeDir() (flexvolumeDirPath, source string) { - ctx := context.TODO() - //copy flexvolume to flexvolume dir - nodeName := os.Getenv(k8sutil.NodeNameEnvVar) - if nodeName == "" { - logger.Warningf("cannot detect the node name. Please provide using the downward API in the rook operator manifest file") - return getDefaultFlexvolumeDir() - } - - // determining where the path of the flexvolume dir on the node - nodeConfigURI, err := k8sutil.NodeConfigURI() - if err != nil { - logger.Warning(err.Error()) - return getDefaultFlexvolumeDir() - } - nodeConfig, err := a.clientset.CoreV1().RESTClient().Get().RequestURI(nodeConfigURI).DoRaw(ctx) - if err != nil { - logger.Warningf("unable to query node configuration: %v", err) - return getDefaultFlexvolumeDir() - } - - // unmarshal to a KubeletConfiguration - kubeletConfiguration := KubeletConfiguration{} - if err := json.Unmarshal(nodeConfig, &kubeletConfiguration); err != nil { - logger.Warningf("unable to parse node config as kubelet configuration. %v", err) - } else { - flexvolumeDirPath = kubeletConfiguration.KubeletConfig.VolumePluginDir - } - - if flexvolumeDirPath != "" { - return flexvolumeDirPath, "KubeletConfiguration" - } - - return getDefaultFlexvolumeDir() -} - -func getDefaultFlexvolumeDir() (flexvolumeDirPath, source string) { - logger.Infof("getting flexvolume dir path from %s env var", flexvolumePathDirEnv) - flexvolumeDirPath = os.Getenv(flexvolumePathDirEnv) - if flexvolumeDirPath != "" { - return flexvolumeDirPath, "env var" - } - - logger.Infof("flexvolume dir path env var %s is not provided. Defaulting to: %s", - flexvolumePathDirEnv, flexvolumeDefaultDirPath) - flexvolumeDirPath = flexvolumeDefaultDirPath - - return flexvolumeDirPath, "default" -} diff --git a/pkg/operator/ceph/agent/agent_test.go b/pkg/operator/ceph/agent/agent_test.go deleted file mode 100644 index e9d2a0693..000000000 --- a/pkg/operator/ceph/agent/agent_test.go +++ /dev/null @@ -1,210 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package agent to manage Kubernetes storage attach events. -package agent - -import ( - "context" - "os" - "testing" - - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestStartAgentDaemonset(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.PodNameEnvVar, "rook-operator") - defer os.Unsetenv(k8sutil.PodNameEnvVar) - - os.Setenv(agentDaemonsetPriorityClassNameEnv, "my-priority-class") - defer os.Unsetenv(agentDaemonsetPriorityClassNameEnv) - - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-operator", - Namespace: "rook-system", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "mypodContainer", - Image: "rook/test", - }, - }, - }, - } - _, err := clientset.CoreV1().Pods("rook-system").Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - - namespace := "ns" - a := New(clientset) - - // start a basic cluster - err = a.Start(namespace, "rook/rook:myversion", "mysa") - assert.Nil(t, err) - - // check daemonset parameters - agentDS, err := clientset.AppsV1().DaemonSets(namespace).Get(ctx, "rook-ceph-agent", metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, namespace, agentDS.Namespace) - assert.Equal(t, "rook-ceph-agent", agentDS.Name) - assert.Equal(t, "my-priority-class", agentDS.Spec.Template.Spec.PriorityClassName) - assert.Equal(t, "mysa", agentDS.Spec.Template.Spec.ServiceAccountName) - assert.True(t, *agentDS.Spec.Template.Spec.Containers[0].SecurityContext.Privileged) - volumes := agentDS.Spec.Template.Spec.Volumes - assert.Equal(t, 4, len(volumes)) - volumeMounts := agentDS.Spec.Template.Spec.Containers[0].VolumeMounts - assert.Equal(t, 4, len(volumeMounts)) - envs := agentDS.Spec.Template.Spec.Containers[0].Env - assert.Equal(t, 5, len(envs)) - image := agentDS.Spec.Template.Spec.Containers[0].Image - assert.Equal(t, "rook/rook:myversion", image) - assert.Nil(t, agentDS.Spec.Template.Spec.Tolerations) -} - -func TestGetContainerImage(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "Default") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.PodNameEnvVar, "mypod") - defer os.Unsetenv(k8sutil.PodNameEnvVar) - - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mypod", - Namespace: "Default", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "mypodContainer", - Image: "rook/test", - }, - }, - }, - } - _, err := clientset.CoreV1().Pods("Default").Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - - // start a basic cluster - returnPod, err := k8sutil.GetRunningPod(clientset) - assert.Nil(t, err) - assert.Equal(t, "mypod", returnPod.Name) -} - -func TestGetContainerImageMultipleContainers(t *testing.T) { - - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mypod", - Namespace: "Default", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "mypodContainer", - Image: "rook/test", - }, - { - Name: "otherPodContainer", - Image: "rook/test2", - }, - }, - }, - } - - // start a basic cluster - container, err := k8sutil.GetContainerImage(&pod, "foo") - assert.NotNil(t, err) - assert.Equal(t, "", container) - assert.Equal(t, "failed to find image for container foo", err.Error()) -} - -func TestStartAgentDaemonsetWithToleration(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.PodNameEnvVar, "rook-operator") - defer os.Unsetenv(k8sutil.PodNameEnvVar) - - os.Setenv(agentDaemonsetTolerationEnv, "NoSchedule") - defer os.Unsetenv(agentDaemonsetTolerationEnv) - - os.Setenv(agentDaemonsetTolerationKeyEnv, "example") - defer os.Unsetenv(agentDaemonsetTolerationKeyEnv) - - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-operator", - Namespace: "rook-system", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "mypodContainer", - Image: "rook/test", - }, - }, - }, - } - _, err := clientset.CoreV1().Pods("rook-system").Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - - namespace := "ns" - a := New(clientset) - - // start a basic cluster - err = a.Start(namespace, "rook/test", "mysa") - assert.Nil(t, err) - - // check daemonset toleration - agentDS, err := clientset.AppsV1().DaemonSets(namespace).Get(ctx, "rook-ceph-agent", metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, 1, len(agentDS.Spec.Template.Spec.Tolerations)) - assert.Equal(t, "mysa", agentDS.Spec.Template.Spec.ServiceAccountName) - assert.Equal(t, "NoSchedule", string(agentDS.Spec.Template.Spec.Tolerations[0].Effect)) - assert.Equal(t, "example", string(agentDS.Spec.Template.Spec.Tolerations[0].Key)) - assert.Equal(t, "Exists", string(agentDS.Spec.Template.Spec.Tolerations[0].Operator)) -} - -func TestDiscoverFlexDir(t *testing.T) { - path, source := getDefaultFlexvolumeDir() - assert.Equal(t, "default", source) - assert.Equal(t, "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", path) - - os.Setenv(flexvolumePathDirEnv, "/my/flex/path/") - defer os.Unsetenv(flexvolumePathDirEnv) - path, source = getDefaultFlexvolumeDir() - assert.Equal(t, "env var", source) - assert.Equal(t, "/my/flex/path/", path) -} diff --git a/pkg/operator/ceph/agent/types.go b/pkg/operator/ceph/agent/types.go deleted file mode 100644 index 2d787e70b..000000000 --- a/pkg/operator/ceph/agent/types.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package agent to manage Kubernetes storage attach events. -package agent - -import ( - "k8s.io/client-go/kubernetes" - componentconfig "k8s.io/kube-controller-manager/config/v1alpha1" -) - -// Agent reference to be deployed -type Agent struct { - clientset kubernetes.Interface -} - -// NodeConfigControllerManager is a reference of all the configuration for the K8S node from the controllermanager -type NodeConfigControllerManager struct { - ComponentConfig componentconfig.KubeControllerManagerConfiguration `json:"componentconfig"` -} - -// KubeletConfiguration represents the response from the node config URI (configz) in Kubernetes 1.8+ -type KubeletConfiguration struct { - KubeletConfig struct { - VolumePluginDir string `json:"volumePluginDir"` - } `json:"kubeletconfig"` -} diff --git a/pkg/operator/ceph/client/controller.go b/pkg/operator/ceph/client/controller.go deleted file mode 100644 index 948a008f7..000000000 --- a/pkg/operator/ceph/client/controller.go +++ /dev/null @@ -1,374 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package client to manage a rook client. -package client - -import ( - "context" - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/pkg/errors" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" -) - -const ( - controllerName = "ceph-client-controller" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -var cephClientKind = reflect.TypeOf(cephv1.CephClient{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephClientKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -// ReconcileCephClient reconciles a CephClient object -type ReconcileCephClient struct { - client client.Client - scheme *runtime.Scheme - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo -} - -// Add creates a new CephClient Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - - return &ReconcileCephClient{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the CephClient CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephClient{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - // Watch secrets - err = c.Watch(&source.Kind{Type: &v1.Secret{TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: v1.SchemeGroupVersion.String()}}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cephv1.CephClient{}, - }, opcontroller.WatchPredicateForNonCRDObject(&cephv1.CephClient{TypeMeta: controllerTypeMeta}, mgr.GetScheme())) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a CephClient object and makes changes based on the state read -// and what is in the CephClient.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileCephClient) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, err := r.reconcile(request) - if err != nil { - logger.Errorf("failed to reconcile %v", err) - } - - return reconcileResponse, err -} - -func (r *ReconcileCephClient) reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the CephClient instance - cephClient := &cephv1.CephClient{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephClient) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("cephClient resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, errors.Wrap(err, "failed to get cephClient") - } - - // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephClient) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") - } - - // The CR was just created, initializing status fields - if cephClient.Status == nil { - updateStatus(r.client, request.NamespacedName, cephv1.ConditionProgressing) - } - - // Make sure a CephCluster is present otherwise do nothing - _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - // This handles the case where the Ceph Cluster is gone and we want to delete that CR - // We skip the deletePool() function since everything is gone already - // - // Also, only remove the finalizer if the CephCluster is gone - // If not, we should wait for it to be ready - // This handles the case where the operator is not ready to accept Ceph command but the cluster exists - if !cephClient.GetDeletionTimestamp().IsZero() && !cephClusterExists { - // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephClient) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - return reconcileResponse, nil - } - - // Populate clusterInfo during each reconcile - r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to populate cluster info") - } - - // DELETE: the CR was deleted - if !cephClient.GetDeletionTimestamp().IsZero() { - logger.Debugf("deleting pool %q", cephClient.Name) - err := r.deleteClient(cephClient) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to delete ceph client %q", cephClient.Name) - } - - // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephClient) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - - // validate the client settings - err = ValidateClient(r.context, cephClient) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to validate client %q arguments", cephClient.Name) - } - - // Create or Update client - err = r.createOrUpdateClient(cephClient) - if err != nil { - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Info(opcontroller.OperatorNotInitializedMessage) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil - } - updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure) - return reconcile.Result{}, errors.Wrapf(err, "failed to create or update client %q", cephClient.Name) - } - - // Success! Let's update the status - updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady) - - // Return and do not requeue - logger.Debug("done reconciling") - return reconcile.Result{}, nil -} - -// Create the client -func (r *ReconcileCephClient) createOrUpdateClient(cephClient *cephv1.CephClient) error { - ctx := context.TODO() - logger.Infof("creating client %s in namespace %s", cephClient.Name, cephClient.Namespace) - - // Generate the CephX details - clientEntity, caps := genClientEntity(cephClient) - - // Check if client was created manually, create if necessary or update caps and create secret - key, err := cephclient.AuthGetKey(r.context, r.clusterInfo, clientEntity) - if err != nil { - key, err = cephclient.AuthGetOrCreateKey(r.context, r.clusterInfo, clientEntity, caps) - if err != nil { - return errors.Wrapf(err, "failed to create client %q", cephClient.Name) - } - } else { - err = cephclient.AuthUpdateCaps(r.context, r.clusterInfo, clientEntity, caps) - if err != nil { - return errors.Wrapf(err, "client %q exists, failed to update client caps", cephClient.Name) - } - } - - // Generate Kubernetes Secret - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: generateCephUserSecretName(cephClient), - Namespace: cephClient.Namespace, - }, - StringData: map[string]string{ - cephClient.Name: key, - }, - Type: k8sutil.RookType, - } - - // Set CephClient owner ref to the Secret - err = controllerutil.SetControllerReference(cephClient, secret, r.scheme) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to ceph client secret %q", secret.Name) - } - - // Create or Update Kubernetes Secret - _, err = r.context.Clientset.CoreV1().Secrets(cephClient.Namespace).Get(ctx, secret.Name, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debugf("creating secret for %q", secret.Name) - if _, err := r.context.Clientset.CoreV1().Secrets(cephClient.Namespace).Create(ctx, secret, metav1.CreateOptions{}); err != nil { - return errors.Wrapf(err, "failed to create secret for %q", secret.Name) - } - logger.Infof("created client %q", cephClient.Name) - return nil - } - return errors.Wrapf(err, "failed to get secret for %q", secret.Name) - } - logger.Debugf("updating secret for %s", secret.Name) - _, err = r.context.Clientset.CoreV1().Secrets(cephClient.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to update secret for %q", secret.Name) - } - - logger.Infof("updated client %q", cephClient.Name) - return nil -} - -// Delete the client -func (r *ReconcileCephClient) deleteClient(cephClient *cephv1.CephClient) error { - logger.Infof("deleting client object %q", cephClient.Name) - if err := cephclient.AuthDelete(r.context, r.clusterInfo, generateClientName(cephClient.Name)); err != nil { - return errors.Wrapf(err, "failed to delete client %q", cephClient.Name) - } - - logger.Infof("deleted client %q", cephClient.Name) - return nil -} - -// ValidateClient the client arguments -func ValidateClient(context *clusterd.Context, cephClient *cephv1.CephClient) error { - // Validate name - if cephClient.Name == "" { - return errors.New("missing name") - } - reservedNames := regexp.MustCompile("^admin$|^rgw.*$|^rbd-mirror$|^osd.[0-9]*$|^bootstrap-(mds|mgr|mon|osd|rgw|^rbd-mirror)$") - if reservedNames.Match([]byte(cephClient.Name)) { - return errors.Errorf("ignoring reserved name %q", cephClient.Name) - } - - // Validate Spec - if cephClient.Spec.Caps == nil { - return errors.New("no caps specified") - } - for _, cap := range cephClient.Spec.Caps { - if cap == "" { - return errors.New("no caps specified") - } - } - - return nil -} - -func genClientEntity(cephClient *cephv1.CephClient) (string, []string) { - caps := []string{} - for name, cap := range cephClient.Spec.Caps { - caps = append(caps, name, cap) - } - - return generateClientName(cephClient.Name), caps -} - -func generateClientName(name string) string { - return fmt.Sprintf("client.%s", name) -} - -// updateStatus updates an object with a given status -func updateStatus(client client.Client, name types.NamespacedName, status cephv1.ConditionType) { - cephClient := &cephv1.CephClient{} - if err := client.Get(context.TODO(), name, cephClient); err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephClient resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve ceph client %q to update status to %q. %v", name, status, err) - return - } - if cephClient.Status == nil { - cephClient.Status = &cephv1.CephClientStatus{} - } - - cephClient.Status.Phase = status - if cephClient.Status.Phase == cephv1.ConditionReady { - cephClient.Status.Info = generateStatusInfo(cephClient) - } - if err := reporting.UpdateStatus(client, cephClient); err != nil { - logger.Errorf("failed to set ceph client %q status to %q. %v", name, status, err) - return - } - logger.Debugf("ceph client %q status updated to %q", name, status) -} - -func generateStatusInfo(client *cephv1.CephClient) map[string]string { - m := make(map[string]string) - m["secretName"] = generateCephUserSecretName(client) - return m -} - -func generateCephUserSecretName(client *cephv1.CephClient) string { - return fmt.Sprintf("rook-ceph-client-%s", client.Name) -} diff --git a/pkg/operator/ceph/client/controller_test.go b/pkg/operator/ceph/client/controller_test.go deleted file mode 100644 index f90bc913e..000000000 --- a/pkg/operator/ceph/client/controller_test.go +++ /dev/null @@ -1,312 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "bytes" - "context" - "os" - "strings" - "testing" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - testop "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - "github.com/tevino/abool" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func TestValidateClient(t *testing.T) { - context := &clusterd.Context{Executor: &exectest.MockExecutor{}} - - // must specify caps - p := cephv1.CephClient{ObjectMeta: metav1.ObjectMeta{Name: "client1", Namespace: "myns"}} - err := ValidateClient(context, &p) - assert.NotNil(t, err) - - // must specify name - p = cephv1.CephClient{ObjectMeta: metav1.ObjectMeta{Namespace: "myns"}} - err = ValidateClient(context, &p) - assert.NotNil(t, err) - - // must specify namespace - p = cephv1.CephClient{ObjectMeta: metav1.ObjectMeta{Name: "client1"}} - err = ValidateClient(context, &p) - assert.NotNil(t, err) - - // succeed with caps properly defined - p = cephv1.CephClient{ObjectMeta: metav1.ObjectMeta{Name: "client1", Namespace: "myns"}} - p.Spec.Caps = map[string]string{ - "osd": "allow *", - "mon": "allow *", - "mds": "allow *", - } - err = ValidateClient(context, &p) - assert.Nil(t, err) -} - -func TestGenerateClient(t *testing.T) { - p := &cephv1.CephClient{ObjectMeta: metav1.ObjectMeta{Name: "client1", Namespace: "myns"}, - Spec: cephv1.ClientSpec{ - Caps: map[string]string{ - "osd": "allow *", - "mon": "allow rw", - "mds": "allow rwx", - }, - }, - } - - client, caps := genClientEntity(p) - equal := bytes.Compare([]byte(client), []byte("client.client1")) - var res bool = equal == 0 - assert.True(t, res) - assert.True(t, strings.Contains(strings.Join(caps, " "), "osd allow *")) - assert.True(t, strings.Contains(strings.Join(caps, " "), "mon allow rw")) - assert.True(t, strings.Contains(strings.Join(caps, " "), "mds allow rwx")) - - // Fail if caps are empty - p2 := &cephv1.CephClient{ObjectMeta: metav1.ObjectMeta{Name: "client2", Namespace: "myns"}, - Spec: cephv1.ClientSpec{ - Caps: map[string]string{ - "osd": "", - "mon": "", - }, - }, - } - - client, _ = genClientEntity(p2) - equal = bytes.Compare([]byte(client), []byte("client.client2")) - res = equal == 0 - assert.True(t, res) -} - -func TestCephClientController(t *testing.T) { - ctx := context.TODO() - // Set DEBUG logging - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - os.Setenv("ROOK_LOG_LEVEL", "DEBUG") - - // - // TEST 1 SETUP - // - // FAILURE because no CephCluster - // - logger.Info("RUN 1") - var ( - name = "my-client" - namespace = "rook-ceph" - ) - - // A Pool resource with metadata and spec. - cephClient := &cephv1.CephClient{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - UID: types.UID("c47cac40-9bee-4d52-823b-ccd803ba5bfe"), - }, - Spec: cephv1.ClientSpec{ - Caps: map[string]string{ - "osd": "allow *", - "mon": "allow *", - }, - }, - Status: &cephv1.CephClientStatus{ - Phase: "", - }, - } - - // Objects to track in the fake client. - object := []runtime.Object{ - cephClient, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - - return "", nil - }, - } - c := &clusterd.Context{ - Executor: executor, - Clientset: testop.New(t, 1), - RookClientset: rookclient.NewSimpleClientset(), - RequestCancelOrchestration: abool.New(), - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephClient{}, &cephv1.CephClusterList{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Create a ReconcileCephClient object with the scheme and fake client. - r := &ReconcileCephClient{ - client: cl, - scheme: s, - context: c, - } - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - - // - // TEST 2: - // - // FAILURE we have a cluster but it's not ready - // - logger.Info("RUN 2") - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephVersion: &cephv1.ClusterVersion{ - Version: "14.2.9-0", - }, - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}, &cephv1.CephClusterList{}) - - object = append(object, cephCluster) - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileCephClient object with the scheme and fake client. - r = &ReconcileCephClient{ - client: cl, - scheme: s, - context: c, - } - assert.True(t, res.Requeue) - - // - // TEST 3: - // - // SUCCESS! The CephCluster is ready - // - logger.Info("RUN 3") - cephCluster.Status.Phase = cephv1.ConditionReady - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - objects := []runtime.Object{ - cephClient, - cephCluster, - } - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objects...).Build() - c.Client = cl - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return `{"key":"AQCvzWBeIV9lFRAAninzm+8XFxbSfTiPwoX50g=="}`, nil - } - - return "", nil - }, - } - c.Executor = executor - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephBlockPoolList{}) - // Create a ReconcileCephClient object with the scheme and fake client. - r = &ReconcileCephClient{ - client: cl, - scheme: s, - context: c, - } - - r = &ReconcileCephClient{ - client: cl, - scheme: s, - context: c, - } - - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - - err = r.client.Get(context.TODO(), req.NamespacedName, cephClient) - assert.NoError(t, err) - assert.Equal(t, cephv1.ConditionReady, cephClient.Status.Phase) - assert.NotEmpty(t, cephClient.Status.Info["secretName"], cephClient.Status.Info) - cephClientSecret, err := c.Clientset.CoreV1().Secrets(namespace).Get(ctx, cephClient.Status.Info["secretName"], metav1.GetOptions{}) - assert.NoError(t, err) - assert.NotEmpty(t, cephClientSecret.StringData) -} - -func TestBuildUpdateStatusInfo(t *testing.T) { - cephClient := &cephv1.CephClient{ - ObjectMeta: metav1.ObjectMeta{ - Name: "client-ocp", - }, - Spec: cephv1.ClientSpec{}, - } - - statusInfo := generateStatusInfo(cephClient) - assert.NotEmpty(t, statusInfo["secretName"]) - assert.Equal(t, "rook-ceph-client-client-ocp", statusInfo["secretName"]) -} diff --git a/pkg/operator/ceph/cluster/cephstatus.go b/pkg/operator/ceph/cluster/cephstatus.go deleted file mode 100644 index acec9a1e0..000000000 --- a/pkg/operator/ceph/cluster/cephstatus.go +++ /dev/null @@ -1,341 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage Kubernetes storage. -package cluster - -import ( - "context" - "fmt" - "os" - "strings" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/reporting" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -var ( - // defaultStatusCheckInterval is the interval to check the status of the ceph cluster - defaultStatusCheckInterval = 60 * time.Second -) - -// cephStatusChecker aggregates the mon/cluster info needed to check the health of the monitors -type cephStatusChecker struct { - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - interval *time.Duration - client client.Client - isExternal bool -} - -// newCephStatusChecker creates a new HealthChecker object -func newCephStatusChecker(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, clusterSpec *cephv1.ClusterSpec) *cephStatusChecker { - c := &cephStatusChecker{ - context: context, - clusterInfo: clusterInfo, - interval: &defaultStatusCheckInterval, - client: context.Client, - isExternal: clusterSpec.External.Enable, - } - - // allow overriding the check interval with an env var on the operator - // Keep the existing behavior - var checkInterval *time.Duration - checkIntervalCRSetting := clusterSpec.HealthCheck.DaemonHealth.Status.Interval - checkIntervalEnv := os.Getenv("ROOK_CEPH_STATUS_CHECK_INTERVAL") - if checkIntervalEnv != "" { - if duration, err := time.ParseDuration(checkIntervalEnv); err == nil { - checkInterval = &duration - } - } else if checkIntervalCRSetting != nil { - checkInterval = &checkIntervalCRSetting.Duration - } - if checkInterval != nil { - logger.Infof("ceph status check interval is %s", checkInterval.String()) - c.interval = checkInterval - } - - return c -} - -// checkCephStatus periodically checks the health of the cluster -func (c *cephStatusChecker) checkCephStatus(stopCh chan struct{}) { - // check the status immediately before starting the loop - c.checkStatus() - - for { - select { - case <-stopCh: - logger.Infof("stopping monitoring of ceph status") - return - - case <-time.After(*c.interval): - c.checkStatus() - } - } -} - -// checkStatus queries the status of ceph health then updates the CR status -func (c *cephStatusChecker) checkStatus() { - var status cephclient.CephStatus - var err error - - logger.Debugf("checking health of cluster") - - condition := cephv1.ConditionReady - reason := cephv1.ClusterCreatedReason - if c.isExternal { - condition = cephv1.ConditionConnected - reason = cephv1.ClusterConnectedReason - } - - // Check ceph's status - status, err = cephclient.StatusWithUser(c.context, c.clusterInfo) - if err != nil { - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Info("skipping ceph status since operator is still initializing") - return - } - logger.Errorf("failed to get ceph status. %v", err) - - message := "Failed to configure ceph cluster" - if c.isExternal { - message = "Failed to configure external ceph cluster" - } - status := cephStatusOnError(err.Error()) - c.updateCephStatus(status, condition, reason, message, v1.ConditionFalse) - return - } - - logger.Debugf("cluster status: %+v", status) - message := "Cluster created successfully" - if c.isExternal { - message = "Cluster connected successfully" - } - c.updateCephStatus(&status, condition, reason, message, v1.ConditionTrue) - - if status.Health.Status != "HEALTH_OK" { - logger.Debug("checking for stuck pods on not ready nodes") - if err := c.forceDeleteStuckRookPodsOnNotReadyNodes(); err != nil { - logger.Errorf("failed to delete pod on not ready nodes. %v", err) - } - } - - c.configureHealthSettings(status) -} - -func (c *cephStatusChecker) configureHealthSettings(status cephclient.CephStatus) { - // loop through the health codes and log what we find - for healthCode, check := range status.Health.Checks { - logger.Debugf("Health: %q, code: %q, message: %q", check.Severity, healthCode, check.Summary.Message) - } - - // disable the insecure global id if there are no old clients - if _, ok := status.Health.Checks["AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED"]; ok { - if _, ok := status.Health.Checks["AUTH_INSECURE_GLOBAL_ID_RECLAIM"]; !ok { - logger.Info("Disabling the insecure global ID as no legacy clients are currently connected. If you still require the insecure connections, see the CVE to suppress the health warning and re-enable the insecure connections. https://docs.ceph.com/en/latest/security/CVE-2021-20288/") - monStore := config.GetMonStore(c.context, c.clusterInfo) - if err := monStore.Set("mon", "auth_allow_insecure_global_id_reclaim", "false"); err != nil { - logger.Warningf("failed to disable the insecure global ID. %v", err) - } else { - logger.Info("insecure global ID is now disabled") - } - } else { - logger.Warning("insecure clients are connected to the cluster, to resolve the AUTH_INSECURE_GLOBAL_ID_RECLAIM health warning please refer to the upgrade guide to ensure all Ceph daemons are updated.") - } - } -} - -// updateStatus updates an object with a given status -func (c *cephStatusChecker) updateCephStatus(status *cephclient.CephStatus, condition cephv1.ConditionType, reason cephv1.ConditionReason, message string, conditionStatus v1.ConditionStatus) { - clusterName := c.clusterInfo.NamespacedName() - cephCluster, err := c.context.RookClientset.CephV1().CephClusters(clusterName.Namespace).Get(context.TODO(), clusterName.Name, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephCluster resource not found. Ignoring since object must be deleted.") - return - } - logger.Errorf("failed to retrieve ceph cluster %q in namespace %q to update status to %+v", clusterName.Name, clusterName.Namespace, status) - return - } - - // Update with Ceph Status - cephCluster.Status.CephStatus = toCustomResourceStatus(cephCluster.Status, status) - - // versions store the ceph version of all the ceph daemons and overall cluster version - versions, err := cephclient.GetAllCephDaemonVersions(c.context, c.clusterInfo) - if err != nil { - logger.Errorf("failed to get ceph daemons versions. %v", err) - } else { - // Update status with Ceph versions - cephCluster.Status.CephStatus.Versions = versions - } - - // Update condition - logger.Debugf("updating ceph cluster %q status and condition to %+v, %v, %s, %s", clusterName.Namespace, status, conditionStatus, reason, message) - opcontroller.UpdateClusterCondition(c.context, cephCluster, c.clusterInfo.NamespacedName(), condition, conditionStatus, reason, message, true) -} - -// toCustomResourceStatus converts the ceph status to the struct expected for the CephCluster CR status -func toCustomResourceStatus(currentStatus cephv1.ClusterStatus, newStatus *cephclient.CephStatus) *cephv1.CephStatus { - s := &cephv1.CephStatus{ - Health: newStatus.Health.Status, - LastChecked: formatTime(time.Now().UTC()), - Details: make(map[string]cephv1.CephHealthMessage), - } - for name, message := range newStatus.Health.Checks { - s.Details[name] = cephv1.CephHealthMessage{ - Severity: message.Severity, - Message: message.Summary.Message, - } - } - - if newStatus.PgMap.TotalBytes != 0 { - s.Capacity.TotalBytes = newStatus.PgMap.TotalBytes - s.Capacity.UsedBytes = newStatus.PgMap.UsedBytes - s.Capacity.AvailableBytes = newStatus.PgMap.AvailableBytes - s.Capacity.LastUpdated = formatTime(time.Now().UTC()) - } - - if currentStatus.CephStatus != nil { - s.PreviousHealth = currentStatus.CephStatus.PreviousHealth - s.LastChanged = currentStatus.CephStatus.LastChanged - if currentStatus.CephStatus.Health != s.Health { - s.PreviousHealth = currentStatus.CephStatus.Health - s.LastChanged = s.LastChecked - } - if newStatus.PgMap.TotalBytes == 0 { - s.Capacity = currentStatus.CephStatus.Capacity - } - } - return s -} - -func formatTime(t time.Time) string { - return t.Format(time.RFC3339) -} - -func (c *ClusterController) updateClusterCephVersion(image string, cephVersion cephver.CephVersion) { - ctx := context.TODO() - logger.Infof("cluster %q: version %q detected for image %q", c.namespacedName.Namespace, cephVersion.String(), image) - - cephCluster, err := c.context.RookClientset.CephV1().CephClusters(c.namespacedName.Namespace).Get(ctx, c.namespacedName.Name, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephCluster resource not found. Ignoring since object must be deleted.") - return - } - logger.Errorf("failed to retrieve ceph cluster %q to update ceph version to %+v. %v", c.namespacedName.Name, cephVersion, err) - return - } - - cephClusterVersion := &cephv1.ClusterVersion{ - Image: image, - Version: opcontroller.GetCephVersionLabel(cephVersion), - } - // update the Ceph version on the retrieved cluster object - // do not overwrite the ceph status that is updated in a separate goroutine - cephCluster.Status.CephVersion = cephClusterVersion - if err := reporting.UpdateStatus(c.client, cephCluster); err != nil { - logger.Errorf("failed to update cluster %q version. %v", c.namespacedName.Name, err) - return - } -} - -func cephStatusOnError(errorMessage string) *cephclient.CephStatus { - details := make(map[string]cephclient.CheckMessage) - details["error"] = cephclient.CheckMessage{ - Severity: "Urgent", - Summary: cephclient.Summary{ - Message: errorMessage, - }, - } - - return &cephclient.CephStatus{ - Health: cephclient.HealthStatus{ - Status: "HEALTH_ERR", - Checks: details, - }, - } -} - -// forceDeleteStuckPodsOnNotReadyNodes lists all the nodes that are in NotReady state and -// gets all the pods on the failed node and force delete the pods stuck in terminating state. -func (c *cephStatusChecker) forceDeleteStuckRookPodsOnNotReadyNodes() error { - nodes, err := k8sutil.GetNotReadyKubernetesNodes(c.context.Clientset) - if err != nil { - return errors.Wrap(err, "failed to get NotReady nodes") - } - for _, node := range nodes { - pods, err := c.getRookPodsOnNode(node.Name) - if err != nil { - logger.Errorf("failed to get pods on NotReady node %q. %v", node.Name, err) - } - for _, pod := range pods { - if err := k8sutil.ForceDeletePodIfStuck(c.context, pod); err != nil { - logger.Warningf("skipping forced delete of stuck pod %q. %v", pod.Name, err) - } - } - } - return nil -} - -func (c *cephStatusChecker) getRookPodsOnNode(node string) ([]v1.Pod, error) { - clusterName := c.clusterInfo.NamespacedName() - appLabels := []string{ - "csi-rbdplugin-provisioner", - "csi-rbdplugin", - "csi-cephfsplugin-provisioner", - "csi-cephfsplugin", - "rook-ceph-operator", - "rook-ceph-mon", - "rook-ceph-osd", - "rook-ceph-crashcollector", - "rook-ceph-mgr", - "rook-ceph-mds", - "rook-ceph-rgw", - } - podsOnNode := []v1.Pod{} - listOpts := metav1.ListOptions{ - FieldSelector: fmt.Sprintf("spec.nodeName=%s", node), - } - pods, err := c.context.Clientset.CoreV1().Pods(clusterName.Namespace).List(context.TODO(), listOpts) - if err != nil { - return podsOnNode, errors.Wrapf(err, "failed to get pods on node %q", node) - } - for _, pod := range pods.Items { - for _, label := range appLabels { - if pod.Labels["app"] == label { - podsOnNode = append(podsOnNode, pod) - break - } - } - - } - return podsOnNode, nil -} diff --git a/pkg/operator/ceph/cluster/cephstatus_test.go b/pkg/operator/ceph/cluster/cephstatus_test.go deleted file mode 100644 index 3bec57ee7..000000000 --- a/pkg/operator/ceph/cluster/cephstatus_test.go +++ /dev/null @@ -1,375 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage Kubernetes storage. -package cluster - -import ( - "context" - "fmt" - "reflect" - "sort" - "testing" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - optest "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestCephStatus(t *testing.T) { - newStatus := &cephclient.CephStatus{ - Health: cephclient.HealthStatus{Status: "HEALTH_OK"}, - } - - // Empty initial status will have no previous health - currentStatus := cephv1.ClusterStatus{} - aggregateStatus := toCustomResourceStatus(currentStatus, newStatus) - assert.NotNil(t, aggregateStatus) - assert.Equal(t, "HEALTH_OK", aggregateStatus.Health) - assert.NotEqual(t, "", aggregateStatus.LastChecked) - assert.Equal(t, "", aggregateStatus.LastChanged) - assert.Equal(t, "", aggregateStatus.PreviousHealth) - assert.Equal(t, 0, len(aggregateStatus.Details)) - - // Set the current status to the same as the new status and there will be no previous health - currentStatus.CephStatus = &cephv1.CephStatus{ - Health: "HEALTH_OK", - } - aggregateStatus = toCustomResourceStatus(currentStatus, newStatus) - assert.NotNil(t, aggregateStatus) - assert.Equal(t, "HEALTH_OK", aggregateStatus.Health) - assert.NotEqual(t, "", aggregateStatus.LastChecked) - assert.Equal(t, "", aggregateStatus.LastChanged) - assert.Equal(t, "", aggregateStatus.PreviousHealth) - assert.Equal(t, 0, len(aggregateStatus.Details)) - - // Set the new status to something different and we should get a previous health - // Simulate the previous check a minute ago. - previousTime := formatTime(time.Now().Add(-time.Minute).UTC()) - currentStatus.CephStatus.LastChecked = previousTime - newStatus.Health.Status = "HEALTH_WARN" - aggregateStatus = toCustomResourceStatus(currentStatus, newStatus) - assert.NotNil(t, aggregateStatus) - assert.Equal(t, "HEALTH_WARN", aggregateStatus.Health) - assert.NotEqual(t, "", aggregateStatus.LastChecked) - assert.Equal(t, aggregateStatus.LastChecked, aggregateStatus.LastChanged) - assert.Equal(t, "HEALTH_OK", aggregateStatus.PreviousHealth) - assert.Equal(t, 0, len(aggregateStatus.Details)) - - // Add some details to the warning - osdDownMsg := cephclient.CheckMessage{Severity: "HEALTH_WARN"} - osdDownMsg.Summary.Message = "1 osd down" - pgAvailMsg := cephclient.CheckMessage{Severity: "HEALTH_ERR"} - pgAvailMsg.Summary.Message = "'Reduced data availability: 100 pgs stale'" - newStatus.Health.Checks = map[string]cephclient.CheckMessage{ - "OSD_DOWN": osdDownMsg, - "PG_AVAILABILITY": pgAvailMsg, - } - newStatus.Health.Status = "HEALTH_ERR" - aggregateStatus = toCustomResourceStatus(currentStatus, newStatus) - assert.NotNil(t, aggregateStatus) - assert.Equal(t, "HEALTH_ERR", aggregateStatus.Health) - assert.NotEqual(t, "", aggregateStatus.LastChecked) - assert.Equal(t, aggregateStatus.LastChecked, aggregateStatus.LastChanged) - assert.Equal(t, "HEALTH_OK", aggregateStatus.PreviousHealth) - assert.Equal(t, 2, len(aggregateStatus.Details)) - assert.Equal(t, osdDownMsg.Summary.Message, aggregateStatus.Details["OSD_DOWN"].Message) - assert.Equal(t, osdDownMsg.Severity, aggregateStatus.Details["OSD_DOWN"].Severity) - assert.Equal(t, pgAvailMsg.Summary.Message, aggregateStatus.Details["PG_AVAILABILITY"].Message) - assert.Equal(t, pgAvailMsg.Severity, aggregateStatus.Details["PG_AVAILABILITY"].Severity) - - // Test for storage capacity of the ceph cluster when there is no disk - newStatus = &cephclient.CephStatus{ - PgMap: cephclient.PgMap{TotalBytes: 0}, - } - aggregateStatus = toCustomResourceStatus(currentStatus, newStatus) - assert.Equal(t, 0, int(aggregateStatus.Capacity.TotalBytes)) - assert.Equal(t, "", aggregateStatus.Capacity.LastUpdated) - - // Test for storage capacity of the ceph cluster when the disk of size 1024 bytes attached - newStatus = &cephclient.CephStatus{ - PgMap: cephclient.PgMap{TotalBytes: 1024}, - } - aggregateStatus = toCustomResourceStatus(currentStatus, newStatus) - assert.Equal(t, 1024, int(aggregateStatus.Capacity.TotalBytes)) - assert.Equal(t, formatTime(time.Now().UTC()), aggregateStatus.Capacity.LastUpdated) - - // Test for storage capacity of the ceph cluster when initially there is a disk of size - // 1024 bytes attached and then the disk is removed or newStatus.PgMap.TotalBytes is 0. - currentStatus.CephStatus.Capacity.TotalBytes = 1024 - newStatus = &cephclient.CephStatus{ - PgMap: cephclient.PgMap{TotalBytes: 0}, - } - - aggregateStatus = toCustomResourceStatus(currentStatus, newStatus) - assert.Equal(t, 1024, int(aggregateStatus.Capacity.TotalBytes)) - assert.Equal(t, formatTime(time.Now().Add(-time.Minute).UTC()), formatTime(time.Now().Add(-time.Minute).UTC())) -} - -func TestNewCephStatusChecker(t *testing.T) { - clusterInfo := cephclient.AdminClusterInfo("ns") - c := &clusterd.Context{} - time10s, err := time.ParseDuration("10s") - assert.NoError(t, err) - - type args struct { - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - clusterSpec *cephv1.ClusterSpec - } - tests := []struct { - name string - args args - want *cephStatusChecker - }{ - {"default-interval", args{c, clusterInfo, &cephv1.ClusterSpec{}}, &cephStatusChecker{c, clusterInfo, &defaultStatusCheckInterval, c.Client, false}}, - {"10s-interval", args{c, clusterInfo, &cephv1.ClusterSpec{HealthCheck: cephv1.CephClusterHealthCheckSpec{DaemonHealth: cephv1.DaemonHealthSpec{Status: cephv1.HealthCheckSpec{Interval: &metav1.Duration{Duration: time10s}}}}}}, &cephStatusChecker{c, clusterInfo, &time10s, c.Client, false}}, - {"10s-interval-external", args{c, clusterInfo, &cephv1.ClusterSpec{External: cephv1.ExternalSpec{Enable: true}, HealthCheck: cephv1.CephClusterHealthCheckSpec{DaemonHealth: cephv1.DaemonHealthSpec{Status: cephv1.HealthCheckSpec{Interval: &metav1.Duration{Duration: time10s}}}}}}, &cephStatusChecker{c, clusterInfo, &time10s, c.Client, true}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := newCephStatusChecker(tt.args.context, tt.args.clusterInfo, tt.args.clusterSpec); !reflect.DeepEqual(got, tt.want) { - t.Errorf("newCephStatusChecker() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestConfigureHealthSettings(t *testing.T) { - c := &cephStatusChecker{ - context: &clusterd.Context{}, - clusterInfo: cephclient.AdminClusterInfo("ns"), - } - setGlobalIDReclaim := false - c.context.Executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "config" && args[3] == "auth_allow_insecure_global_id_reclaim" { - if args[1] == "set" { - setGlobalIDReclaim = true - return "", nil - } - } - return "", errors.New("mock error to simulate failure of mon store config") - }, - } - noActionOneWarningStatus := cephclient.CephStatus{ - Health: cephclient.HealthStatus{ - Checks: map[string]cephclient.CheckMessage{ - "MDS_ALL_DOWN": { - Severity: "HEALTH_WARN", - Summary: cephclient.Summary{ - Message: "MDS_ALL_DOWN", - }, - }, - }, - }, - } - disableInsecureGlobalIDStatus := cephclient.CephStatus{ - Health: cephclient.HealthStatus{ - Checks: map[string]cephclient.CheckMessage{ - "AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED": { - Severity: "HEALTH_WARN", - Summary: cephclient.Summary{ - Message: "foo", - }, - }, - }, - }, - } - noDisableInsecureGlobalIDStatus := cephclient.CephStatus{ - Health: cephclient.HealthStatus{ - Checks: map[string]cephclient.CheckMessage{ - "AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED": { - Severity: "HEALTH_WARN", - Summary: cephclient.Summary{ - Message: "foo", - }, - }, - "AUTH_INSECURE_GLOBAL_ID_RECLAIM": { - Severity: "HEALTH_WARN", - Summary: cephclient.Summary{ - Message: "bar", - }, - }, - }, - }, - } - - type args struct { - status cephclient.CephStatus - expectedSetGlobalIDSetting bool - } - tests := []struct { - name string - args args - }{ - {"no-warnings", args{cephclient.CephStatus{}, false}}, - {"no-action-one-warning", args{noActionOneWarningStatus, false}}, - {"disable-insecure-global-id", args{disableInsecureGlobalIDStatus, true}}, - {"no-disable-insecure-global-id", args{noDisableInsecureGlobalIDStatus, false}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - setGlobalIDReclaim = false - c.configureHealthSettings(tt.args.status) - assert.Equal(t, tt.args.expectedSetGlobalIDSetting, setGlobalIDReclaim) - }) - } -} - -func TestForceDeleteStuckRookPodsOnNotReadyNodes(t *testing.T) { - ctx := context.TODO() - clientset := optest.New(t, 1) - clusterInfo := cephclient.NewClusterInfo("test", "test") - clusterName := clusterInfo.NamespacedName() - - context := &clusterd.Context{ - Clientset: clientset, - } - - c := newCephStatusChecker(context, clusterInfo, &cephv1.ClusterSpec{}) - - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "stuck-pod", - Namespace: clusterName.Namespace, - Labels: map[string]string{ - "app": "rook-ceph-osd", - }, - }, - } - pod.Spec.NodeName = "node0" - _, err := context.Clientset.CoreV1().Pods(clusterName.Namespace).Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Create a non matching pod - notDeletePod := pod - notDeletePod.ObjectMeta.Labels = map[string]string{"app": "not-to-be-deleted"} - notDeletePod.ObjectMeta.Name = "not-to-be-deleted" - notDeletePod.DeletionTimestamp = &metav1.Time{Time: time.Now()} - _, err = context.Clientset.CoreV1().Pods(clusterName.Namespace).Create(ctx, ¬DeletePod, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Set the node to NotReady state - nodes, err := context.Clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - for _, node := range nodes.Items { - node.Status.Conditions[0].Status = v1.ConditionFalse - localnode := node - _, err := context.Clientset.CoreV1().Nodes().Update(ctx, &localnode, metav1.UpdateOptions{}) - assert.NoError(t, err) - } - - // There should be no error - err = c.forceDeleteStuckRookPodsOnNotReadyNodes() - assert.NoError(t, err) - - // The pod should still exist since its not deleted. - p, err := context.Clientset.CoreV1().Pods(clusterInfo.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) - assert.NoError(t, err) - assert.NotNil(t, p) - - // Add a deletion timestamp to the pod - pod.DeletionTimestamp = &metav1.Time{Time: time.Now()} - _, err = clientset.CoreV1().Pods(clusterName.Namespace).Update(ctx, &pod, metav1.UpdateOptions{}) - assert.NoError(t, err) - - // There should be no error as the pod is deleted - err = c.forceDeleteStuckRookPodsOnNotReadyNodes() - assert.NoError(t, err) - - // The pod should be deleted since the pod is marked as deleted and the node is in NotReady state - _, err = clientset.CoreV1().Pods(clusterName.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) - assert.Error(t, err) - assert.True(t, kerrors.IsNotFound(err)) - - // The pod should not be deleted as it does not have the matching labels - _, err = clientset.CoreV1().Pods(clusterName.Namespace).Get(ctx, notDeletePod.Name, metav1.GetOptions{}) - assert.NoError(t, err) -} - -func TestGetRookPodsOnNode(t *testing.T) { - ctx := context.TODO() - clientset := optest.New(t, 1) - clusterInfo := cephclient.NewClusterInfo("test", "test") - clusterName := clusterInfo.NamespacedName() - context := &clusterd.Context{ - Clientset: clientset, - } - - c := newCephStatusChecker(context, clusterInfo, &cephv1.ClusterSpec{}) - labels := []map[string]string{ - {"app": "rook-ceph-osd"}, - {"app": "csi-rbdplugin-provisioner"}, - {"app": "csi-rbdplugin"}, - {"app": "csi-cephfsplugin-provisioner"}, - {"app": "csi-cephfsplugin"}, - {"app": "rook-ceph-operator"}, - {"app": "rook-ceph-crashcollector"}, - {"app": "rook-ceph-mgr"}, - {"app": "rook-ceph-mds"}, - {"app": "rook-ceph-rgw"}, - {"app": "user-app"}, - {"app": "rook-ceph-mon"}, - } - - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-with-no-label", - Namespace: clusterName.Namespace, - }, - } - pod.Spec.NodeName = "node0" - _, err := context.Clientset.CoreV1().Pods(clusterName.Namespace).Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - - expectedPodNames := []string{} - for i, label := range labels { - pod.ObjectMeta.Name = fmt.Sprintf("pod-%d", i) - pod.ObjectMeta.Namespace = clusterName.Namespace - pod.ObjectMeta.Labels = label - if label["app"] != "user-app" { - expectedPodNames = append(expectedPodNames, pod.Name) - } - _, err := context.Clientset.CoreV1().Pods(clusterName.Namespace).Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - } - - pods, err := c.getRookPodsOnNode("node0") - assert.NoError(t, err) - // A pod is having two matching labels and its returned only once - assert.Equal(t, 11, len(pods)) - - podNames := []string{} - for _, pod := range pods { - // Check if the pods has labels - assert.NotEmpty(t, pod.Labels) - podNames = append(podNames, pod.Name) - } - - sort.Strings(expectedPodNames) - sort.Strings(podNames) - assert.Equal(t, expectedPodNames, podNames) -} diff --git a/pkg/operator/ceph/cluster/cleanup.go b/pkg/operator/ceph/cluster/cleanup.go deleted file mode 100644 index c0ed852e2..000000000 --- a/pkg/operator/ceph/cluster/cleanup.go +++ /dev/null @@ -1,253 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "context" - "fmt" - "strconv" - "strings" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - "github.com/rook/rook/pkg/operator/ceph/cluster/rbd" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/file/mds" - "github.com/rook/rook/pkg/operator/ceph/file/mirror" - "github.com/rook/rook/pkg/operator/ceph/object" - "github.com/rook/rook/pkg/operator/k8sutil" - batch "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" -) - -const ( - clusterCleanUpPolicyRetryInterval = 5 //seconds - // CleanupAppName is the cluster clean up job name - CleanupAppName = "rook-ceph-cleanup" -) - -var ( - volumeName = "cleanup-volume" - dataDirHostPath = "ROOK_DATA_DIR_HOST_PATH" - namespaceDir = "ROOK_NAMESPACE_DIR" - monitorSecret = "ROOK_MON_SECRET" - clusterFSID = "ROOK_CLUSTER_FSID" - sanitizeMethod = "ROOK_SANITIZE_METHOD" - sanitizeDataSource = "ROOK_SANITIZE_DATA_SOURCE" - sanitizeIteration = "ROOK_SANITIZE_ITERATION" - sanitizeIterationDefault int32 = 1 -) - -func (c *ClusterController) startClusterCleanUp(stopCleanupCh chan struct{}, cluster *cephv1.CephCluster, cephHosts []string, monSecret, clusterFSID string) { - logger.Infof("starting clean up for cluster %q", cluster.Name) - err := c.waitForCephDaemonCleanUp(stopCleanupCh, cluster, time.Duration(clusterCleanUpPolicyRetryInterval)*time.Second) - if err != nil { - logger.Errorf("failed to wait till ceph daemons are destroyed. %v", err) - return - } - - c.startCleanUpJobs(cluster, cephHosts, monSecret, clusterFSID) -} - -func (c *ClusterController) startCleanUpJobs(cluster *cephv1.CephCluster, cephHosts []string, monSecret, clusterFSID string) { - for _, hostName := range cephHosts { - logger.Infof("starting clean up job on node %q", hostName) - jobName := k8sutil.TruncateNodeName("cluster-cleanup-job-%s", hostName) - podSpec := c.cleanUpJobTemplateSpec(cluster, monSecret, clusterFSID) - podSpec.Spec.NodeSelector = map[string]string{v1.LabelHostname: hostName} - labels := controller.AppLabels(CleanupAppName, cluster.Namespace) - labels[CleanupAppName] = "true" - job := &batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: cluster.Namespace, - Labels: labels, - }, - Spec: batch.JobSpec{ - Template: podSpec, - }, - } - - // Apply annotations - cephv1.GetCleanupAnnotations(cluster.Spec.Annotations).ApplyToObjectMeta(&job.ObjectMeta) - cephv1.GetCleanupLabels(cluster.Spec.Labels).ApplyToObjectMeta(&job.ObjectMeta) - - if err := k8sutil.RunReplaceableJob(c.context.Clientset, job, true); err != nil { - logger.Errorf("failed to run cluster clean up job on node %q. %v", hostName, err) - } - } -} - -func (c *ClusterController) cleanUpJobContainer(cluster *cephv1.CephCluster, monSecret, cephFSID string) v1.Container { - volumeMounts := []v1.VolumeMount{} - envVars := []v1.EnvVar{} - if cluster.Spec.DataDirHostPath != "" { - if cluster.Spec.CleanupPolicy.SanitizeDisks.Iteration == 0 { - cluster.Spec.CleanupPolicy.SanitizeDisks.Iteration = sanitizeIterationDefault - } - - hostPathVolumeMount := v1.VolumeMount{Name: volumeName, MountPath: cluster.Spec.DataDirHostPath} - devMount := v1.VolumeMount{Name: "devices", MountPath: "/dev"} - volumeMounts = append(volumeMounts, hostPathVolumeMount) - volumeMounts = append(volumeMounts, devMount) - envVars = append(envVars, []v1.EnvVar{ - {Name: dataDirHostPath, Value: cluster.Spec.DataDirHostPath}, - {Name: namespaceDir, Value: cluster.Namespace}, - {Name: monitorSecret, Value: monSecret}, - {Name: clusterFSID, Value: cephFSID}, - {Name: "ROOK_LOG_LEVEL", Value: "DEBUG"}, - mon.PodNamespaceEnvVar(cluster.Namespace), - {Name: sanitizeMethod, Value: cluster.Spec.CleanupPolicy.SanitizeDisks.Method.String()}, - {Name: sanitizeDataSource, Value: cluster.Spec.CleanupPolicy.SanitizeDisks.DataSource.String()}, - {Name: sanitizeIteration, Value: strconv.Itoa(int(cluster.Spec.CleanupPolicy.SanitizeDisks.Iteration))}, - }...) - } - - return v1.Container{ - Name: "host-cleanup", - Image: c.rookImage, - SecurityContext: osd.PrivilegedContext(), - VolumeMounts: volumeMounts, - Env: envVars, - Args: []string{"ceph", "clean"}, - Resources: cephv1.GetCleanupResources(cluster.Spec.Resources), - } -} - -func (c *ClusterController) cleanUpJobTemplateSpec(cluster *cephv1.CephCluster, monSecret, clusterFSID string) v1.PodTemplateSpec { - volumes := []v1.Volume{} - hostPathVolume := v1.Volume{Name: volumeName, VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: cluster.Spec.DataDirHostPath}}} - devVolume := v1.Volume{Name: "devices", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/dev"}}} - volumes = append(volumes, hostPathVolume) - volumes = append(volumes, devVolume) - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: CleanupAppName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - c.cleanUpJobContainer(cluster, monSecret, clusterFSID), - }, - Volumes: volumes, - RestartPolicy: v1.RestartPolicyOnFailure, - PriorityClassName: cephv1.GetCleanupPriorityClassName(cluster.Spec.PriorityClassNames), - }, - } - - cephv1.GetCleanupAnnotations(cluster.Spec.Annotations).ApplyToObjectMeta(&podSpec.ObjectMeta) - cephv1.GetCleanupLabels(cluster.Spec.Labels).ApplyToObjectMeta(&podSpec.ObjectMeta) - - // Apply placement - getCleanupPlacement(cluster.Spec).ApplyToPodSpec(&podSpec.Spec) - - return podSpec -} - -// getCleanupPlacement returns the placement for the cleanup job -func getCleanupPlacement(c cephv1.ClusterSpec) cephv1.Placement { - // The cleanup jobs are assigned by the operator to a specific node, so the - // node affinity and other affinity are not needed for scheduling. - // The only placement required for the cleanup daemons is the tolerations. - tolerations := c.Placement[cephv1.KeyAll].Tolerations - tolerations = append(tolerations, c.Placement[cephv1.KeyCleanup].Tolerations...) - tolerations = append(tolerations, c.Placement[cephv1.KeyMonArbiter].Tolerations...) - tolerations = append(tolerations, c.Placement[cephv1.KeyMon].Tolerations...) - tolerations = append(tolerations, c.Placement[cephv1.KeyMgr].Tolerations...) - tolerations = append(tolerations, c.Placement[cephv1.KeyOSD].Tolerations...) - - // Add the tolerations for all the device sets - for _, deviceSet := range c.Storage.StorageClassDeviceSets { - tolerations = append(tolerations, deviceSet.Placement.Tolerations...) - } - return cephv1.Placement{Tolerations: tolerations} -} - -func (c *ClusterController) waitForCephDaemonCleanUp(stopCleanupCh chan struct{}, cluster *cephv1.CephCluster, retryInterval time.Duration) error { - logger.Infof("waiting for all the ceph daemons to be cleaned up in the cluster %q", cluster.Namespace) - for { - select { - case <-time.After(retryInterval): - cephHosts, err := c.getCephHosts(cluster.Namespace) - if err != nil { - return errors.Wrap(err, "failed to list ceph daemon nodes") - } - - if len(cephHosts) == 0 { - logger.Info("all ceph daemons are cleaned up") - return nil - } - - logger.Debugf("waiting for ceph daemons in cluster %q to be cleaned up. Retrying in %q", - cluster.Namespace, retryInterval.String()) - case <-stopCleanupCh: - return errors.New("cancelling the host cleanup job") - } - } -} - -// getCephHosts returns a list of host names where ceph daemon pods are running -func (c *ClusterController) getCephHosts(namespace string) ([]string, error) { - ctx := context.TODO() - cephAppNames := []string{mon.AppName, mgr.AppName, osd.AppName, object.AppName, mds.AppName, rbd.AppName, mirror.AppName} - nodeNameList := sets.NewString() - hostNameList := []string{} - var b strings.Builder - - // get all the node names where ceph daemons are running - for _, app := range cephAppNames { - appLabelSelector := fmt.Sprintf("app=%s", app) - podList, err := c.context.Clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: appLabelSelector}) - if err != nil { - return hostNameList, errors.Wrapf(err, "could not list the %q pods", app) - } - for _, cephPod := range podList.Items { - podNodeName := cephPod.Spec.NodeName - if podNodeName != "" && !nodeNameList.Has(podNodeName) { - nodeNameList.Insert(podNodeName) - } - } - fmt.Fprintf(&b, "%s: %d. ", app, len(podList.Items)) - } - - logger.Infof("existing ceph daemons in the namespace %q. %s", namespace, b.String()) - - for nodeName := range nodeNameList { - podHostName, err := k8sutil.GetNodeHostName(c.context.Clientset, nodeName) - if err != nil { - return nil, errors.Wrapf(err, "failed to get hostname from node %q", nodeName) - } - hostNameList = append(hostNameList, podHostName) - } - - return hostNameList, nil -} - -func (c *ClusterController) getCleanUpDetails(namespace string) (string, string, error) { - clusterInfo, _, _, err := mon.LoadClusterInfo(c.context, namespace) - if err != nil { - return "", "", errors.Wrap(err, "failed to get cluster info") - } - - return clusterInfo.MonitorSecret, clusterInfo.FSID, nil -} diff --git a/pkg/operator/ceph/cluster/cleanup_test.go b/pkg/operator/ceph/cluster/cleanup_test.go deleted file mode 100644 index 4f3876ae0..000000000 --- a/pkg/operator/ceph/cluster/cleanup_test.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookfake "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - testop "github.com/rook/rook/pkg/operator/test" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestCleanupJobSpec(t *testing.T) { - expectedHostPath := "var/lib/rook" - expectedNamespace := "test-rook-ceph" - cluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: expectedNamespace, - }, - Spec: cephv1.ClusterSpec{ - DataDirHostPath: expectedHostPath, - CleanupPolicy: cephv1.CleanupPolicySpec{ - Confirmation: "yes-really-destroy-data", - }, - }, - } - clientset := testop.New(t, 3) - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookfake.NewSimpleClientset(), - } - operatorConfigCallbacks := []func() error{ - func() error { - logger.Infof("test success callback") - return nil - }, - } - addCallbacks := []func() error{ - func() error { - logger.Infof("test success callback") - return nil - }, - } - controller := NewClusterController(context, "", &attachment.MockAttachment{}, operatorConfigCallbacks, addCallbacks) - podTemplateSpec := controller.cleanUpJobTemplateSpec(cluster, "monSecret", "28b87851-8dc1-46c8-b1ec-90ec51a47c89") - assert.Equal(t, expectedHostPath, podTemplateSpec.Spec.Containers[0].Env[0].Value) - assert.Equal(t, expectedNamespace, podTemplateSpec.Spec.Containers[0].Env[1].Value) -} - -func TestCleanupPlacement(t *testing.T) { - // no tolerations end up in an empty list of tolerations - c := cephv1.ClusterSpec{} - p := getCleanupPlacement(c) - assert.Equal(t, cephv1.Placement{}, p) - - // add tolerations for each of the daemons - c.Placement = cephv1.PlacementSpec{} - c.Placement[cephv1.KeyAll] = cephv1.Placement{Tolerations: []v1.Toleration{{Key: "allToleration"}}} - p = getCleanupPlacement(c) - assert.Equal(t, c.Placement[cephv1.KeyAll], p) - - c.Placement[cephv1.KeyMon] = cephv1.Placement{Tolerations: []v1.Toleration{{Key: "monToleration"}}} - p = getCleanupPlacement(c) - assert.Equal(t, 2, len(p.Tolerations)) - - c.Placement[cephv1.KeyMgr] = cephv1.Placement{Tolerations: []v1.Toleration{{Key: "mgrToleration"}}} - p = getCleanupPlacement(c) - assert.Equal(t, 3, len(p.Tolerations)) - - c.Placement[cephv1.KeyMonArbiter] = cephv1.Placement{Tolerations: []v1.Toleration{{Key: "monArbiterToleration"}}} - p = getCleanupPlacement(c) - assert.Equal(t, 4, len(p.Tolerations)) - - c.Placement[cephv1.KeyOSD] = cephv1.Placement{Tolerations: []v1.Toleration{{Key: "osdToleration"}}} - p = getCleanupPlacement(c) - assert.Equal(t, 5, len(p.Tolerations)) - - c.Storage.StorageClassDeviceSets = []cephv1.StorageClassDeviceSet{ - {Placement: cephv1.Placement{Tolerations: []v1.Toleration{{Key: "deviceSetToleration"}}}}, - } - p = getCleanupPlacement(c) - assert.Equal(t, 6, len(p.Tolerations)) -} diff --git a/pkg/operator/ceph/cluster/cluster.go b/pkg/operator/ceph/cluster/cluster.go deleted file mode 100755 index c6a72069f..000000000 --- a/pkg/operator/ceph/cluster/cluster.go +++ /dev/null @@ -1,583 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - "context" - "fmt" - "os/exec" - "path" - "strings" - "sync" - "syscall" - - "github.com/pkg/errors" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/daemon/ceph/osd/kms" - "github.com/rook/rook/pkg/operator/ceph/cluster/crash" - "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/csi" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -const ( - detectVersionName = "rook-ceph-detect-version" -) - -type cluster struct { - ClusterInfo *client.ClusterInfo - context *clusterd.Context - Namespace string - Spec *cephv1.ClusterSpec - namespacedName types.NamespacedName - mons *mon.Cluster - stopCh chan struct{} - closedStopCh bool - ownerInfo *k8sutil.OwnerInfo - isUpgrade bool - watchersActivated bool - monitoringChannels map[string]*clusterHealth -} - -type clusterHealth struct { - stopChan chan struct{} - monitoringRunning bool -} - -func newCluster(c *cephv1.CephCluster, context *clusterd.Context, csiMutex *sync.Mutex, ownerInfo *k8sutil.OwnerInfo) *cluster { - return &cluster{ - // at this phase of the cluster creation process, the identity components of the cluster are - // not yet established. we reserve this struct which is filled in as soon as the cluster's - // identity can be established. - ClusterInfo: client.AdminClusterInfo(c.Namespace), - Namespace: c.Namespace, - Spec: &c.Spec, - context: context, - namespacedName: types.NamespacedName{Namespace: c.Namespace, Name: c.Name}, - monitoringChannels: make(map[string]*clusterHealth), - stopCh: make(chan struct{}), - ownerInfo: ownerInfo, - mons: mon.New(context, c.Namespace, c.Spec, ownerInfo, csiMutex), - } -} - -func (c *cluster) reconcileCephDaemons(rookImage string, cephVersion cephver.CephVersion) error { - // Create a configmap for overriding ceph config settings - // These settings should only be modified by a user after they are initialized - err := populateConfigOverrideConfigMap(c.context, c.Namespace, c.ownerInfo) - if err != nil { - return errors.Wrap(err, "failed to populate config override config map") - } - - // Start the mon pods - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph Mons") - clusterInfo, err := c.mons.Start(c.ClusterInfo, rookImage, cephVersion, *c.Spec) - if err != nil { - return errors.Wrap(err, "failed to start ceph monitors") - } - clusterInfo.OwnerInfo = c.ownerInfo - clusterInfo.SetName(c.namespacedName.Name) - c.ClusterInfo = clusterInfo - c.ClusterInfo.NetworkSpec = c.Spec.Network - - // The cluster Identity must be established at this point - if !c.ClusterInfo.IsInitialized(true) { - return errors.New("the cluster identity was not established") - } - - // Check whether we need to cancel the orchestration - if err := controller.CheckForCancelledOrchestration(c.context); err != nil { - return err - } - - // Execute actions after the monitors are up and running - logger.Debug("monitors are up and running, executing post actions") - err = c.postMonStartupActions() - if err != nil { - return errors.Wrap(err, "failed to execute post actions after all the ceph monitors started") - } - - // If this is an upgrade, notify all the child controllers - if c.isUpgrade { - logger.Info("upgrade in progress, notifying child CRs") - err := c.notifyChildControllerOfUpgrade() - if err != nil { - return errors.Wrap(err, "failed to notify child CRs of upgrade") - } - } - - // Start Ceph manager - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph Mgr(s)") - mgrs := mgr.New(c.context, c.ClusterInfo, *c.Spec, rookImage) - err = mgrs.Start() - if err != nil { - return errors.Wrap(err, "failed to start ceph mgr") - } - - // Start the OSDs - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph OSDs") - osds := osd.New(c.context, c.ClusterInfo, *c.Spec, rookImage) - err = osds.Start() - if err != nil { - return errors.Wrap(err, "failed to start ceph osds") - } - - // If a stretch cluster, enable the arbiter after the OSDs are created with the CRUSH map - if c.Spec.IsStretchCluster() { - if err := c.mons.ConfigureArbiter(); err != nil { - return errors.Wrap(err, "failed to configure stretch arbiter") - } - } - - logger.Infof("done reconciling ceph cluster in namespace %q", c.Namespace) - - // We should be done updating by now - if c.isUpgrade { - c.printOverallCephVersion() - - // reset the isUpgrade flag - c.isUpgrade = false - } - - return nil -} - -func (c *ClusterController) initializeCluster(cluster *cluster) error { - // Check if the dataDirHostPath is located in the disallowed paths list - cleanDataDirHostPath := path.Clean(cluster.Spec.DataDirHostPath) - for _, b := range disallowedHostDirectories { - if cleanDataDirHostPath == b { - logger.Errorf("dataDirHostPath (given: %q) must not be used, conflicts with %q internal path", cluster.Spec.DataDirHostPath, b) - return nil - } - } - - clusterInfo, _, _, err := mon.LoadClusterInfo(c.context, cluster.Namespace) - if err != nil { - logger.Infof("clusterInfo not yet found, must be a new cluster") - } else { - clusterInfo.OwnerInfo = cluster.ownerInfo - clusterInfo.SetName(c.namespacedName.Name) - cluster.ClusterInfo = clusterInfo - } - - // Depending on the cluster type choose the correct orchestation - if cluster.Spec.External.Enable { - err := c.configureExternalCephCluster(cluster) - if err != nil { - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionFalse, cephv1.ClusterProgressingReason, err.Error()) - return errors.Wrap(err, "failed to configure external ceph cluster") - } - } else { - // If the local cluster has already been configured, immediately start monitoring the cluster. - // Test if the cluster has already been configured if the mgr deployment has been created. - // If the mgr does not exist, the mons have never been verified to be in quorum. - opts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", k8sutil.AppAttr, mgr.AppName)} - mgrDeployments, err := c.context.Clientset.AppsV1().Deployments(cluster.Namespace).List(context.TODO(), opts) - if err == nil && len(mgrDeployments.Items) > 0 && cluster.ClusterInfo != nil { - c.configureCephMonitoring(cluster, clusterInfo) - } - - err = c.configureLocalCephCluster(cluster) - if err != nil { - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionFalse, cephv1.ClusterProgressingReason, err.Error()) - return errors.Wrap(err, "failed to configure local ceph cluster") - } - } - - // Populate ClusterInfo with the last value - cluster.mons.ClusterInfo = cluster.ClusterInfo - cluster.mons.ClusterInfo.SetName(c.namespacedName.Name) - - // Start the monitoring if not already started - c.configureCephMonitoring(cluster, cluster.ClusterInfo) - return nil -} - -func (c *ClusterController) configureLocalCephCluster(cluster *cluster) error { - // Cluster Spec validation - err := preClusterStartValidation(cluster) - if err != nil { - return errors.Wrap(err, "failed to perform validation before cluster creation") - } - - // Run image validation job - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Detecting Ceph version") - cephVersion, isUpgrade, err := c.detectAndValidateCephVersion(cluster) - if err != nil { - return errors.Wrap(err, "failed the ceph version check") - } - // Set the value of isUpgrade based on the image discovery done by detectAndValidateCephVersion() - cluster.isUpgrade = isUpgrade - - if cluster.Spec.IsStretchCluster() { - if !cephVersion.IsAtLeast(cephver.CephVersion{Major: 16, Minor: 2, Build: 5}) { - return errors.Errorf("stretch clusters minimum ceph version is v16.2.5, but is running %s", cephVersion.String()) - } - } - - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring the Ceph cluster") - - // Run the orchestration - err = cluster.reconcileCephDaemons(c.rookImage, *cephVersion) - if err != nil { - return errors.Wrap(err, "failed to create cluster") - } - - // Set the condition to the cluster object - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionReady, v1.ConditionTrue, cephv1.ClusterCreatedReason, "Cluster created successfully") - return nil -} - -func (c *cluster) notifyChildControllerOfUpgrade() error { - ctx := context.TODO() - version := strings.Replace(c.ClusterInfo.CephVersion.String(), " ", "-", -1) - - // List all child controllers - cephFilesystems, err := c.context.RookClientset.CephV1().CephFilesystems(c.Namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "failed to list ceph filesystem CRs") - } - for _, cephFilesystem := range cephFilesystems.Items { - if cephFilesystem.Labels == nil { - cephFilesystem.Labels = map[string]string{} - } - cephFilesystem.Labels["ceph_version"] = version - localCephFilesystem := cephFilesystem - _, err := c.context.RookClientset.CephV1().CephFilesystems(c.Namespace).Update(ctx, &localCephFilesystem, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to update ceph filesystem CR %q with new label", cephFilesystem.Name) - } - } - - cephObjectStores, err := c.context.RookClientset.CephV1().CephObjectStores(c.Namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "failed to list ceph object store CRs") - } - for _, cephObjectStore := range cephObjectStores.Items { - if cephObjectStore.Labels == nil { - cephObjectStore.Labels = map[string]string{} - } - cephObjectStore.Labels["ceph_version"] = version - localCephObjectStore := cephObjectStore - _, err := c.context.RookClientset.CephV1().CephObjectStores(c.Namespace).Update(ctx, &localCephObjectStore, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to update ceph object store CR %q with new label", cephObjectStore.Name) - } - } - - cephNFSes, err := c.context.RookClientset.CephV1().CephNFSes(c.Namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "failed to list ceph nfs CRs") - } - for _, cephNFS := range cephNFSes.Items { - if cephNFS.Labels == nil { - cephNFS.Labels = map[string]string{} - } - cephNFS.Labels["ceph_version"] = version - localCephNFS := cephNFS - _, err := c.context.RookClientset.CephV1().CephNFSes(c.Namespace).Update(ctx, &localCephNFS, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to update ceph nfs CR %q with new label", cephNFS.Name) - } - } - - cephRBDMirrors, err := c.context.RookClientset.CephV1().CephRBDMirrors(c.Namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "failed to list ceph rbd-mirror CRs") - } - for _, cephRBDMirror := range cephRBDMirrors.Items { - if cephRBDMirror.Labels == nil { - cephRBDMirror.Labels = map[string]string{} - } - cephRBDMirror.Labels["ceph_version"] = version - localCephRBDMirror := cephRBDMirror - _, err := c.context.RookClientset.CephV1().CephRBDMirrors(c.Namespace).Update(ctx, &localCephRBDMirror, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to update ceph rbd-mirror CR %q with new label", cephRBDMirror.Name) - } - } - - cephFilesystemMirrors, err := c.context.RookClientset.CephV1().CephFilesystemMirrors(c.Namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "failed to list cephfs mirror CRs") - } - for _, cephFilesystemMirror := range cephFilesystemMirrors.Items { - if cephFilesystemMirror.Labels == nil { - cephFilesystemMirror.Labels = map[string]string{} - } - cephFilesystemMirror.Labels["ceph_version"] = version - localCephFilesystemMirror := cephFilesystemMirror - _, err := c.context.RookClientset.CephV1().CephFilesystemMirrors(c.Namespace).Update(ctx, &localCephFilesystemMirror, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to update ceph nfs CR %q with new label", cephFilesystemMirror.Name) - } - } - - return nil -} - -// Validate the cluster Specs -func preClusterStartValidation(cluster *cluster) error { - ctx := context.TODO() - if cluster.Spec.Mon.Count == 0 { - logger.Warningf("mon count should be at least 1, will use default value of %d", mon.DefaultMonCount) - cluster.Spec.Mon.Count = mon.DefaultMonCount - } - if cluster.Spec.Mon.Count%2 == 0 { - return errors.Errorf("mon count %d cannot be even, must be odd to support a healthy quorum", cluster.Spec.Mon.Count) - } - if !cluster.Spec.Mon.AllowMultiplePerNode { - // Check that there are enough nodes to have a chance of starting the requested number of mons - nodes, err := cluster.context.Clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - if err == nil && len(nodes.Items) < cluster.Spec.Mon.Count { - return errors.Errorf("cannot start %d mons on %d node(s) when allowMultiplePerNode is false", cluster.Spec.Mon.Count, len(nodes.Items)) - } - } - if err := validateStretchCluster(cluster); err != nil { - return err - } - if cluster.Spec.Network.IsMultus() { - _, isPublic := cluster.Spec.Network.Selectors[config.PublicNetworkSelectorKeyName] - _, isCluster := cluster.Spec.Network.Selectors[config.ClusterNetworkSelectorKeyName] - if !isPublic && !isCluster { - return errors.New("both network selector values for public and cluster selector cannot be empty for multus provider") - } - - for _, selector := range config.NetworkSelectors { - // If one selector is empty, we continue - // This means a single interface is used both public and cluster network - if _, ok := cluster.Spec.Network.Selectors[selector]; !ok { - continue - } - - multusNamespace, nad := config.GetMultusNamespace(cluster.Spec.Network.Selectors[selector]) - if multusNamespace == "" { - multusNamespace = cluster.Namespace - } - - // Get network attachment definition - _, err := cluster.context.NetworkClient.NetworkAttachmentDefinitions(multusNamespace).Get(ctx, nad, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - return errors.Wrapf(err, "specified network attachment definition for selector %q does not exist", selector) - } - return errors.Wrapf(err, "failed to fetch network attachment definition for selector %q", selector) - } - } - } - - // Validate on-PVC cluster encryption KMS settings - if cluster.Spec.Storage.IsOnPVCEncrypted() && cluster.Spec.Security.KeyManagementService.IsEnabled() { - // Validate the KMS details - err := kms.ValidateConnectionDetails(cluster.context, cluster.Spec.Security, cluster.Namespace) - if err != nil { - return errors.Wrap(err, "failed to validate kms connection details") - } - } - - logger.Debug("cluster spec successfully validated") - return nil -} - -func validateStretchCluster(cluster *cluster) error { - if !cluster.Spec.IsStretchCluster() { - return nil - } - if len(cluster.Spec.Mon.StretchCluster.Zones) != 3 { - return errors.Errorf("expecting exactly three zones for the stretch cluster, but found %d", len(cluster.Spec.Mon.StretchCluster.Zones)) - } - if cluster.Spec.Mon.Count != 3 && cluster.Spec.Mon.Count != 5 { - return errors.Errorf("invalid number of mons %d for a stretch cluster, expecting 5 (recommended) or 3 (minimal)", cluster.Spec.Mon.Count) - } - arbitersFound := 0 - for _, zone := range cluster.Spec.Mon.StretchCluster.Zones { - if zone.Arbiter { - arbitersFound++ - } - if zone.Name == "" { - return errors.New("missing zone name for the stretch cluster") - } - } - if arbitersFound != 1 { - return errors.Errorf("expecting to find exactly one arbiter zone, but found %d", arbitersFound) - } - return nil -} - -func extractExitCode(err error) (int, bool) { - exitErr, ok := err.(*exec.ExitError) - if ok { - return exitErr.ExitCode(), true - } - return 0, false -} - -func (c *cluster) createCrushRoot(newRoot string) error { - args := []string{"osd", "crush", "add-bucket", newRoot, "root"} - cephCmd := client.NewCephCommand(c.context, c.ClusterInfo, args) - _, err := cephCmd.Run() - if err != nil { - // returns zero if the bucket exists already, so any error is fatal - return errors.Wrap(err, "failed to create CRUSH root") - } - - return nil -} - -func (c *cluster) replaceDefaultReplicationRule(newRoot string) error { - args := []string{"osd", "crush", "rule", "rm", "replicated_rule"} - cephCmd := client.NewCephCommand(c.context, c.ClusterInfo, args) - _, err := cephCmd.Run() - if err != nil { - if code, ok := extractExitCode(err); ok && code == int(syscall.EBUSY) { - // we do not want to delete the replicated_rule if it’s in use, - // and we also do not care much. There are two possible causes: - // - the user has created this rule with the non-default CRUSH - // root manually - // - the user is using this rule despite the rule using the default - // CRUSH root - // in both cases, we cannot do anything about it either way and - // we’ll assume that the user knows what they’re doing. - logger.Warning("replicated_rule is in use, not replaced") - return nil - } - // the error does not refer to EBUSY -> return as error - return errors.Wrap(err, "failed to remove default replicated_rule") - } - - args = []string{ - "osd", "crush", "rule", "create-replicated", - "replicated_rule", newRoot, "host", - } - cephCmd = client.NewCephCommand(c.context, c.ClusterInfo, args) - _, err = cephCmd.Run() - if err != nil { - // returns zero if the rule exists already, so any error is fatal - return errors.Wrap(err, "failed to create new default replicated_rule") - } - - return nil -} - -func (c *cluster) removeDefaultCrushRoot() error { - args := []string{"osd", "crush", "rm", "default"} - cephCmd := client.NewCephCommand(c.context, c.ClusterInfo, args) - _, err := cephCmd.Run() - if err != nil { - if code, ok := extractExitCode(err); ok { - if code == int(syscall.ENOTEMPTY) || code == int(syscall.EBUSY) { - // we do not want to delete the default node if it’s in use, - // and we also do not care much. There are two more causes here: - // - a (non-root?) CRUSH node with the default label was created - // automatically, e.g. from topology labels, and OSDs (or sub - // nodes) have been placed in there. In this case, the node - // obviously needs to be preserved. - // - the root=default CRUSH node is in use by a non-default - // CRUSH rule - // - OSDs or subnodes have been placed under the root=default - // CRUSH node - // - // in all cases, we cannot do anything about it either way and - // we’ll assume that the user knows what they’re doing. - logger.Debug("default is not empty or is still in use, not removed") - return nil - } - } - // the error does not refer to EBUSY or ENOTEMPTY -> return as error - return errors.Wrap(err, "failed to remove CRUSH node 'default'") - } - return nil -} - -// Remove the default root=default and replicated_rule CRUSH objects which are created by Ceph on initial startup. -// Those objects may interfere with the normal operation of the cluster. -// Note that errors which indicate that the objects are in use are ignored and the objects will continue to exist in that case. -func (c *cluster) replaceDefaultCrushMap(newRoot string) (err error) { - logger.Info("creating new CRUSH root if it does not exist") - err = c.createCrushRoot(newRoot) - if err != nil { - return errors.Wrap(err, "failed to create CRUSH root") - } - - logger.Info("replacing default replicated_rule CRUSH rule for use of non-default CRUSH root") - err = c.replaceDefaultReplicationRule(newRoot) - if err != nil { - return errors.Wrap(err, "failed to replace default rule") - } - - logger.Info("replacing default CRUSH node if applicable") - err = c.removeDefaultCrushRoot() - if err != nil { - return errors.Wrap(err, "failed to remove default CRUSH root") - } - - return nil -} - -// postMonStartupActions is a collection of actions to run once the monitors are up and running -// It gets executed right after the main mon Start() method -// Basically, it is executed between the monitors and the manager sequence -func (c *cluster) postMonStartupActions() error { - // Create CSI Kubernetes Secrets - err := csi.CreateCSISecrets(c.context, c.ClusterInfo) - if err != nil { - return errors.Wrap(err, "failed to create csi kubernetes secrets") - } - - // Create crash collector Kubernetes Secret - err = crash.CreateCrashCollectorSecret(c.context, c.ClusterInfo) - if err != nil { - return errors.Wrap(err, "failed to create crash collector kubernetes secret") - } - - // Enable Ceph messenger 2 protocol on Nautilus - if err := client.EnableMessenger2(c.context, c.ClusterInfo); err != nil { - return errors.Wrap(err, "failed to enable Ceph messenger version 2") - } - - crushRoot := client.GetCrushRootFromSpec(c.Spec) - if crushRoot != "default" { - // Remove the root=default and replicated_rule which are created by - // default. Note that RemoveDefaultCrushMap ignores some types of errors - // internally - if err := c.replaceDefaultCrushMap(crushRoot); err != nil { - return errors.Wrap(err, "failed to remove default CRUSH map") - } - } - - // Create cluster-wide RBD bootstrap peer token - _, err = controller.CreateBootstrapPeerSecret(c.context, c.ClusterInfo, &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Name: c.namespacedName.Name, Namespace: c.Namespace}}, c.ownerInfo) - if err != nil { - return errors.Wrap(err, "failed to create cluster rbd bootstrap peer token") - } - - return nil -} diff --git a/pkg/operator/ceph/cluster/cluster_external.go b/pkg/operator/ceph/cluster/cluster_external.go deleted file mode 100644 index fbdc7fa02..000000000 --- a/pkg/operator/ceph/cluster/cluster_external.go +++ /dev/null @@ -1,253 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - "context" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/crash" - "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/csi" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -func (c *ClusterController) configureExternalCephCluster(cluster *cluster) error { - // Make sure the spec contains all the information we need - err := validateExternalClusterSpec(cluster) - if err != nil { - return errors.Wrap(err, "failed to validate external cluster specs") - } - - opcontroller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionConnecting, v1.ConditionTrue, cephv1.ClusterConnectingReason, "Attempting to connect to an external Ceph cluster") - - // loop until we find the secret necessary to connect to the external cluster - // then populate clusterInfo - - cluster.ClusterInfo = mon.PopulateExternalClusterInfo(c.context, c.namespacedName.Namespace, cluster.ownerInfo) - cluster.ClusterInfo.SetName(c.namespacedName.Name) - - if !client.IsKeyringBase64Encoded(cluster.ClusterInfo.CephCred.Secret) { - return errors.Errorf("invalid user health checker key for user %q", cluster.ClusterInfo.CephCred.Username) - } - - // Write connection info (ceph config file and keyring) for ceph commands - if cluster.Spec.CephVersion.Image == "" { - err = mon.WriteConnectionConfig(c.context, cluster.ClusterInfo) - if err != nil { - logger.Errorf("failed to write config. attempting to continue. %v", err) - } - } - - // Validate versions (local and external) - // If no image is specified we don't perform any checks - if cluster.Spec.CephVersion.Image != "" { - _, _, err = c.detectAndValidateCephVersion(cluster) - if err != nil { - return errors.Wrap(err, "failed to detect and validate ceph version") - } - - // Write the rook-config-override configmap (used by various daemons to apply config overrides) - // If we don't do this, daemons will never start, waiting forever for this configmap to be present - // - // Only do this when doing a bit of management... - logger.Infof("creating %q configmap", k8sutil.ConfigOverrideName) - err = populateConfigOverrideConfigMap(c.context, c.namespacedName.Namespace, cluster.ClusterInfo.OwnerInfo) - if err != nil { - return errors.Wrap(err, "failed to populate config override config map") - } - - logger.Infof("creating %q secret", config.StoreName) - err = config.GetStore(c.context, c.namespacedName.Namespace, cluster.ClusterInfo.OwnerInfo).CreateOrUpdate(cluster.ClusterInfo) - if err != nil { - return errors.Wrap(err, "failed to update the global config") - } - } - - // The cluster Identity must be established at this point - if !cluster.ClusterInfo.IsInitialized(true) { - return errors.New("the cluster identity was not established") - } - logger.Info("external cluster identity established") - - // Create CSI Secrets only if the user has provided the admin key - if cluster.ClusterInfo.CephCred.Username == client.AdminUsername { - err = csi.CreateCSISecrets(c.context, cluster.ClusterInfo) - if err != nil { - return errors.Wrap(err, "failed to create csi kubernetes secrets") - } - } - - // Create CSI config map - err = csi.CreateCsiConfigMap(c.namespacedName.Namespace, c.context.Clientset, cluster.ownerInfo) - if err != nil { - return errors.Wrap(err, "failed to create csi config map") - } - - // Save CSI configmap - err = csi.SaveClusterConfig(c.context.Clientset, c.namespacedName.Namespace, cluster.ClusterInfo, c.csiConfigMutex) - if err != nil { - return errors.Wrap(err, "failed to update csi cluster config") - } - logger.Info("successfully updated csi config map") - - // Create Crash Collector Secret - // In 14.2.5 the crash daemon will read the client.crash key instead of the admin key - if !cluster.Spec.CrashCollector.Disable { - err = crash.CreateCrashCollectorSecret(c.context, cluster.ClusterInfo) - if err != nil { - return errors.Wrap(err, "failed to create crash collector kubernetes secret") - } - } - - // enable monitoring if `monitoring: enabled: true` - // We need the Ceph version - if cluster.Spec.Monitoring.Enabled { - // Discover external Ceph version to detect which service monitor to inject - externalVersion, err := client.GetCephMonVersion(c.context, cluster.ClusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get external ceph mon version") - } - cluster.ClusterInfo.CephVersion = *externalVersion - - // Populate ceph version - c.updateClusterCephVersion("", *externalVersion) - - err = c.configureExternalClusterMonitoring(c.context, cluster) - if err != nil { - return errors.Wrap(err, "failed to configure external cluster monitoring") - } - } - - // We don't update the connection status since it is done by the health go routine - return nil -} - -func purgeExternalCluster(clientset kubernetes.Interface, namespace string) { - ctx := context.TODO() - // Purge the config maps - cmsToDelete := []string{ - mon.EndpointConfigMapName, - k8sutil.ConfigOverrideName, - } - for _, cm := range cmsToDelete { - err := clientset.CoreV1().ConfigMaps(namespace).Delete(ctx, cm, metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - logger.Errorf("failed to delete config map %q. %v", cm, err) - } - } - - // Purge the secrets - secretsToDelete := []string{ - mon.AppName, - mon.OperatorCreds, - csi.CsiRBDNodeSecret, - csi.CsiRBDProvisionerSecret, - csi.CsiCephFSNodeSecret, - csi.CsiCephFSProvisionerSecret, - config.StoreName, - } - for _, secret := range secretsToDelete { - err := clientset.CoreV1().Secrets(namespace).Delete(ctx, secret, metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - logger.Errorf("failed to delete secret %q. %v", secret, err) - } - } -} - -func validateExternalClusterSpec(cluster *cluster) error { - if cluster.Spec.CephVersion.Image != "" { - if cluster.Spec.DataDirHostPath == "" { - return errors.New("dataDirHostPath must be specified") - } - } - - // Validate external services port - if cluster.Spec.Monitoring.Enabled { - if cluster.Spec.Monitoring.ExternalMgrPrometheusPort == 0 { - cluster.Spec.Monitoring.ExternalMgrPrometheusPort = mgr.DefaultMetricsPort - } - } - - return nil -} - -func (c *ClusterController) configureExternalClusterMonitoring(context *clusterd.Context, cluster *cluster) error { - // Initialize manager object - manager := mgr.New( - context, - cluster.ClusterInfo, - *cluster.Spec, - "", // We don't need the image since we are not running any mgr deployment - ) - - // Create external monitoring Service - service, err := manager.MakeMetricsService(opcontroller.ExternalMgrAppName, "", opcontroller.ServiceExternalMetricName) - if err != nil { - return err - } - logger.Info("creating mgr external monitoring service") - _, err = k8sutil.CreateOrUpdateService(context.Clientset, cluster.Namespace, service) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create or update mgr service") - } - logger.Info("mgr external metrics service created") - - // Configure external metrics endpoint - err = opcontroller.ConfigureExternalMetricsEndpoint(context, cluster.Spec.Monitoring, cluster.ClusterInfo, cluster.ownerInfo) - if err != nil { - return errors.Wrap(err, "failed to configure external metrics endpoint") - } - - // Deploy external ServiceMonittor - logger.Info("creating external service monitor") - // servicemonitor takes some metadata from the service for easy mapping - err = manager.EnableServiceMonitor("") - if err != nil { - logger.Errorf("failed to enable external service monitor. %v", err) - } else { - logger.Info("external service monitor created") - } - - // namespace in which the prometheusRule should be deployed - // if left empty, it will be deployed in current namespace - namespace := cluster.Spec.Monitoring.RulesNamespace - if namespace == "" { - namespace = cluster.Namespace - } - - logger.Info("creating external prometheus rule") - err = manager.DeployPrometheusRule(mgr.PrometheusExternalRuleName, namespace) - if err != nil { - logger.Errorf("failed to create external prometheus rule. %v", err) - } else { - logger.Info("external prometheus rule created") - } - - return nil -} diff --git a/pkg/operator/ceph/cluster/cluster_external_test.go b/pkg/operator/ceph/cluster/cluster_external_test.go deleted file mode 100644 index c3a2f5e5a..000000000 --- a/pkg/operator/ceph/cluster/cluster_external_test.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/stretchr/testify/assert" -) - -func TestValidateExternalClusterSpec(t *testing.T) { - c := &cluster{Spec: &cephv1.ClusterSpec{}, mons: &mon.Cluster{}} - err := validateExternalClusterSpec(c) - assert.NoError(t, err) - - c.Spec.CephVersion.Image = "quay.io/ceph/ceph:v15" - err = validateExternalClusterSpec(c) - assert.Error(t, err) - - c.Spec.DataDirHostPath = "path" - err = validateExternalClusterSpec(c) - assert.NoError(t, err, err) - assert.Equal(t, uint16(0), c.Spec.Monitoring.ExternalMgrPrometheusPort) - - c.Spec.Monitoring.Enabled = true - err = validateExternalClusterSpec(c) - assert.NoError(t, err, err) - assert.Equal(t, uint16(9283), c.Spec.Monitoring.ExternalMgrPrometheusPort) - -} diff --git a/pkg/operator/ceph/cluster/cluster_test.go b/pkg/operator/ceph/cluster/cluster_test.go deleted file mode 100644 index eb94e1d18..000000000 --- a/pkg/operator/ceph/cluster/cluster_test.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - testop "github.com/rook/rook/pkg/operator/test" -) - -func TestPreClusterStartValidation(t *testing.T) { - type args struct { - cluster *cluster - } - tests := []struct { - name string - args args - wantErr bool - }{ - {"no settings", args{&cluster{Spec: &cephv1.ClusterSpec{}, context: &clusterd.Context{Clientset: testop.New(t, 3)}}}, false}, - {"even mons", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 2}}}}, true}, - {"missing stretch zones", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ - {Name: "a"}, - }}}}}}, true}, - {"missing arbiter", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ - {Name: "a"}, - {Name: "b"}, - {Name: "c"}, - }}}}}}, true}, - {"missing zone name", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ - {Arbiter: true}, - {Name: "b"}, - {Name: "c"}, - }}}}}}, true}, - {"valid stretch cluster", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 3, StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ - {Name: "a", Arbiter: true}, - {Name: "b"}, - {Name: "c"}, - }}}}}}, false}, - {"not enough stretch nodes", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 5, StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ - {Name: "a", Arbiter: true}, - {Name: "b"}, - {Name: "c"}, - }}}}}}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := preClusterStartValidation(tt.args.cluster); (err != nil) != tt.wantErr { - t.Errorf("ClusterController.preClusterStartValidation() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/pkg/operator/ceph/cluster/controller.go b/pkg/operator/ceph/cluster/controller.go deleted file mode 100644 index bf41cc1f8..000000000 --- a/pkg/operator/ceph/cluster/controller.go +++ /dev/null @@ -1,590 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - "context" - "fmt" - "os" - "strings" - "sync" - "time" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/daemon/ceph/osd/kms" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/csi" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - controllerName = "ceph-cluster-controller" - detectCephVersionTimeout = 15 * time.Minute -) - -const ( - // DefaultClusterName states the default name of the rook-cluster if not provided. - DefaultClusterName = "rook-ceph" - disableHotplugEnv = "ROOK_DISABLE_DEVICE_HOTPLUG" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - // disallowedHostDirectories directories which are not allowed to be used - disallowedHostDirectories = []string{"/etc/ceph", "/rook", "/var/log/ceph"} -) - -// List of object resources to watch by the controller -var objectsToWatch = []client.Object{ - &appsv1.Deployment{TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.String()}}, - &corev1.Service{TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: corev1.SchemeGroupVersion.String()}}, - &corev1.Secret{TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: corev1.SchemeGroupVersion.String()}}, - &corev1.ConfigMap{TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: corev1.SchemeGroupVersion.String()}}, -} - -// ControllerTypeMeta Sets the type meta for the controller main object -var ControllerTypeMeta = metav1.TypeMeta{ - Kind: opcontroller.ClusterResource.Kind, - APIVersion: opcontroller.ClusterResource.APIVersion, -} - -// ClusterController controls an instance of a Rook cluster -type ClusterController struct { - context *clusterd.Context - volumeAttachment attachment.Attachment - rookImage string - clusterMap map[string]*cluster - operatorConfigCallbacks []func() error - addClusterCallbacks []func() error - csiConfigMutex *sync.Mutex - osdChecker *osd.OSDHealthMonitor - client client.Client - namespacedName types.NamespacedName - recorder *k8sutil.EventReporter -} - -// ReconcileCephCluster reconciles a CephFilesystem object -type ReconcileCephCluster struct { - client client.Client - scheme *runtime.Scheme - context *clusterd.Context - clusterController *ClusterController -} - -// Add creates a new CephCluster Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, ctx *clusterd.Context, clusterController *ClusterController) error { - return add(mgr, newReconciler(mgr, ctx, clusterController), ctx) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, ctx *clusterd.Context, clusterController *ClusterController) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - - // add "rook-" prefix to the controller name to make sure it is clear to all reading the events - // that they are coming from Rook. The controller name already has context that it is for Ceph - // and from the cluster controller. - clusterController.recorder = k8sutil.NewEventReporter(mgr.GetEventRecorderFor("rook-" + controllerName)) - - return &ReconcileCephCluster{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: ctx, - clusterController: clusterController, - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler, context *clusterd.Context) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the CephCluster CR object - err = c.Watch( - &source.Kind{ - Type: &cephv1.CephCluster{ - TypeMeta: ControllerTypeMeta, - }, - }, - &handler.EnqueueRequestForObject{}, - watchControllerPredicate(context)) - if err != nil { - return err - } - - // Watch all other resources of the Ceph Cluster - for _, t := range objectsToWatch { - err = c.Watch( - &source.Kind{ - Type: t, - }, - &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cephv1.CephCluster{}, - }, - opcontroller.WatchPredicateForNonCRDObject(&cephv1.CephCluster{TypeMeta: ControllerTypeMeta}, mgr.GetScheme())) - if err != nil { - return err - } - } - - // Build Handler function to return the list of ceph clusters - // This is used by the watchers below - handlerFunc, err := opcontroller.ObjectToCRMapper(mgr.GetClient(), &cephv1.CephClusterList{}, mgr.GetScheme()) - if err != nil { - return err - } - - // Watch for nodes additions and updates - err = c.Watch( - &source.Kind{ - Type: &corev1.Node{ - TypeMeta: metav1.TypeMeta{ - Kind: "Node", - APIVersion: corev1.SchemeGroupVersion.String(), - }, - }, - }, - handler.EnqueueRequestsFromMapFunc(handlerFunc), - predicateForNodeWatcher(mgr.GetClient(), context)) - if err != nil { - return err - } - - // Watch for changes on the hotplug config map - // TODO: to improve, can we run this against the operator namespace only? - disableVal := os.Getenv(disableHotplugEnv) - if disableVal != "true" { - logger.Info("enabling hotplug orchestration") - err = c.Watch( - &source.Kind{ - Type: &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - Kind: "ConfigMap", - APIVersion: corev1.SchemeGroupVersion.String(), - }, - }, - }, - handler.EnqueueRequestsFromMapFunc(handlerFunc), - predicateForHotPlugCMWatcher(mgr.GetClient())) - if err != nil { - return err - } - } else { - logger.Info("hotplug orchestration disabled") - } - - return nil -} - -// Reconcile reads that state of the cluster for a CephCluster object and makes changes based on the state read -// and what is in the cephCluster.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileCephCluster) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, cephCluster, err := r.reconcile(request) - if err != nil && strings.Contains(err.Error(), opcontroller.CancellingOrchestrationMessage) { - logger.Infof("Cluster update requested. %s", opcontroller.CancellingOrchestrationMessage) - return opcontroller.ImmediateRetryResultNoBackoff, nil - } - - return reporting.ReportReconcileResult(logger, r.clusterController.recorder, - cephCluster, reconcileResponse, err) -} - -func (r *ReconcileCephCluster) reconcile(request reconcile.Request) (reconcile.Result, *cephv1.CephCluster, error) { - // Pass the client context to the ClusterController - r.clusterController.client = r.client - - // Used by functions not part of the ClusterController struct but are given the context to execute actions - r.clusterController.context.Client = r.client - - // Pass object name and namespace - r.clusterController.namespacedName = request.NamespacedName - - // Fetch the cephCluster instance - cephCluster := &cephv1.CephCluster{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephCluster) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("cephCluster resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, cephCluster, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, cephCluster, errors.Wrap(err, "failed to get cephCluster") - } - - // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephCluster) - if err != nil { - return reconcile.Result{}, cephCluster, errors.Wrap(err, "failed to add finalizer") - } - - // DELETE: the CR was deleted - if !cephCluster.GetDeletionTimestamp().IsZero() { - return r.reconcileDelete(cephCluster) - } - - // Do reconcile here! - ownerInfo := k8sutil.NewOwnerInfo(cephCluster, r.scheme) - if err := r.clusterController.reconcileCephCluster(cephCluster, ownerInfo); err != nil { - return reconcile.Result{}, cephCluster, errors.Wrapf(err, "failed to reconcile cluster %q", cephCluster.Name) - } - - // Return and do not requeue - return reconcile.Result{}, cephCluster, nil -} - -func (r *ReconcileCephCluster) reconcileDelete(cephCluster *cephv1.CephCluster) (reconcile.Result, *cephv1.CephCluster, error) { - nsName := r.clusterController.namespacedName - var err error - - // Set the deleting status - opcontroller.UpdateClusterCondition(r.context, cephCluster, nsName, - cephv1.ConditionDeleting, corev1.ConditionTrue, cephv1.ClusterDeletingReason, "Deleting the CephCluster", - true /* keep all other conditions to be safe */) - - deps, err := CephClusterDependents(r.context, cephCluster.Namespace) - if err != nil { - return reconcile.Result{}, cephCluster, err - } - if !deps.Empty() { - err := reporting.ReportDeletionBlockedDueToDependents(logger, r.client, cephCluster, deps) - return opcontroller.WaitForRequeueIfFinalizerBlocked, cephCluster, err - } - reporting.ReportDeletionNotBlockedDueToDependents(logger, r.client, r.clusterController.recorder, cephCluster) - - doCleanup := true - - // Start cluster clean up only if cleanupPolicy is applied to the ceph cluster - stopCleanupCh := make(chan struct{}) - if cephCluster.Spec.CleanupPolicy.HasDataDirCleanPolicy() && !cephCluster.Spec.External.Enable { - monSecret, clusterFSID, err := r.clusterController.getCleanUpDetails(cephCluster.Namespace) - if err != nil { - logger.Warningf("failed to get mon secret. Skip cluster cleanup and remove finalizer. %v", err) - doCleanup = false - } - - if doCleanup { - cephHosts, err := r.clusterController.getCephHosts(cephCluster.Namespace) - if err != nil { - close(stopCleanupCh) - return reconcile.Result{}, cephCluster, errors.Wrapf(err, "failed to find valid ceph hosts in the cluster %q", cephCluster.Namespace) - } - // Go will garbage collect the stopCleanupCh if it is left open once the cluster cleanup - // goroutine is no longer running (i.e., referencing the channel) - go r.clusterController.startClusterCleanUp(stopCleanupCh, cephCluster, cephHosts, monSecret, clusterFSID) - } else { - // stop channel not needed if the cleanup goroutine isn't started - close(stopCleanupCh) - } - } - - if doCleanup { - // Run delete sequence - response, err := r.clusterController.requestClusterDelete(cephCluster) - if err != nil { - // If the cluster cannot be deleted, requeue the request for deletion to see if the conditions - // will eventually be satisfied such as the volumes being removed - close(stopCleanupCh) - return response, cephCluster, errors.Wrapf(err, "failed to clean up CephCluster %q", nsName.String()) - } - } - - // Remove finalizer - err = removeFinalizer(r.client, nsName) - if err != nil { - return reconcile.Result{}, cephCluster, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, cephCluster, nil -} - -// NewClusterController create controller for watching cluster custom resources created -func NewClusterController(context *clusterd.Context, rookImage string, volumeAttachment attachment.Attachment, operatorConfigCallbacks []func() error, addClusterCallbacks []func() error) *ClusterController { - return &ClusterController{ - context: context, - volumeAttachment: volumeAttachment, - rookImage: rookImage, - clusterMap: make(map[string]*cluster), - operatorConfigCallbacks: operatorConfigCallbacks, - addClusterCallbacks: addClusterCallbacks, - csiConfigMutex: &sync.Mutex{}, - } -} - -func (c *ClusterController) reconcileCephCluster(clusterObj *cephv1.CephCluster, ownerInfo *k8sutil.OwnerInfo) error { - if clusterObj.Spec.CleanupPolicy.HasDataDirCleanPolicy() { - logger.Infof("skipping orchestration for cluster object %q in namespace %q because its cleanup policy is set", clusterObj.Name, clusterObj.Namespace) - return nil - } - - cluster, ok := c.clusterMap[clusterObj.Namespace] - if !ok { - // It's a new cluster so let's populate the struct - cluster = newCluster(clusterObj, c.context, c.csiConfigMutex, ownerInfo) - } - cluster.namespacedName = c.namespacedName - - // Pass down the client to interact with Kubernetes objects - // This will be used later down by spec code to create objects like deployment, services etc - cluster.context.Client = c.client - - // Set the spec - cluster.Spec = &clusterObj.Spec - - // Note that this lock is held through the callback process, as this creates CSI resources, but we must lock in - // this scope as the clusterMap is authoritative on cluster count and thus involved in the check for CSI resource - // deletion. If we ever add additional callback functions, we should tighten this lock. - c.csiConfigMutex.Lock() - c.clusterMap[cluster.Namespace] = cluster - logger.Infof("reconciling ceph cluster in namespace %q", cluster.Namespace) - - for _, callback := range c.addClusterCallbacks { - if err := callback(); err != nil { - logger.Errorf("%v", err) - } - } - c.csiConfigMutex.Unlock() - - // Start the main ceph cluster orchestration - return c.initializeCluster(cluster) -} - -func (c *ClusterController) requestClusterDelete(cluster *cephv1.CephCluster) (reconcile.Result, error) { - nsName := fmt.Sprintf("%s/%s", cluster.Namespace, cluster.Name) - - if existing, ok := c.clusterMap[cluster.Namespace]; ok && existing.namespacedName.Name != cluster.Name { - logger.Errorf("skipping deletion of CephCluster %q. CephCluster CR %q already exists in this namespace. only one cluster cr per namespace is supported.", - nsName, existing.namespacedName.Name) - return reconcile.Result{}, nil // do not requeue the delete - } - - logger.Infof("cleaning up CephCluster %q", nsName) - - if cluster, ok := c.clusterMap[cluster.Namespace]; ok { - // if not already stopped, stop clientcontroller and bucketController - if !cluster.closedStopCh { - close(cluster.stopCh) - cluster.closedStopCh = true - } - - // close the goroutines watching the health of the cluster (mons, osds, ceph status) - for _, daemon := range monitorDaemonList { - if monitoring, ok := cluster.monitoringChannels[daemon]; ok && monitoring.monitoringRunning { - close(cluster.monitoringChannels[daemon].stopChan) - cluster.monitoringChannels[daemon].monitoringRunning = false - } - } - } - - if cluster.Spec.CleanupPolicy.AllowUninstallWithVolumes { - logger.Info("skipping check for existing PVs as allowUninstallWithVolumes is set to true") - } else { - err := c.checkIfVolumesExist(cluster) - if err != nil { - return opcontroller.WaitForRequeueIfFinalizerBlocked, errors.Wrapf(err, "failed to check if volumes exist for CephCluster in namespace %q", cluster.Namespace) - } - } - - if cluster.Spec.External.Enable { - purgeExternalCluster(c.context.Clientset, cluster.Namespace) - } else if cluster.Spec.Storage.IsOnPVCEncrypted() && cluster.Spec.Security.KeyManagementService.IsEnabled() { - // If the StorageClass retain policy of an encrypted cluster with KMS is Delete we also delete the keys - // Delete keys from KMS - err := c.deleteOSDEncryptionKeyFromKMS(cluster) - if err != nil { - logger.Errorf("failed to delete osd encryption keys for CephCluster %q from kms; deletion will continue. %v", nsName, err) - } - } - - if cluster, ok := c.clusterMap[cluster.Namespace]; ok { - delete(c.clusterMap, cluster.Namespace) - } - - return reconcile.Result{}, nil -} - -func (c *ClusterController) checkIfVolumesExist(cluster *cephv1.CephCluster) error { - if csi.CSIEnabled() { - err := c.csiVolumesAllowForDeletion(cluster) - if err != nil { - return err - } - } - if !opcontroller.FlexDriverEnabled(c.context) { - logger.Debugf("Flex driver disabled, skipping check for volume attachments for cluster %q", cluster.Namespace) - return nil - } - return c.flexVolumesAllowForDeletion(cluster) -} - -func (c *ClusterController) flexVolumesAllowForDeletion(cluster *cephv1.CephCluster) error { - operatorNamespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - vols, err := c.volumeAttachment.List(operatorNamespace) - if err != nil { - return errors.Wrapf(err, "failed to get volume attachments for operator namespace %q", operatorNamespace) - } - - // find volume attachments in the deleted cluster - attachmentsExist := false -AttachmentLoop: - for _, vol := range vols.Items { - for _, a := range vol.Attachments { - if a.ClusterName == cluster.Namespace { - // there is still an outstanding volume attachment in the cluster that is being deleted. - attachmentsExist = true - break AttachmentLoop - } - } - } - - if !attachmentsExist { - logger.Infof("no volume attachments for cluster %q to clean up.", cluster.Namespace) - return nil - } - - return errors.Errorf("waiting for volume attachments in cluster %q to be cleaned up.", cluster.Namespace) -} - -func (c *ClusterController) csiVolumesAllowForDeletion(cluster *cephv1.CephCluster) error { - drivers := []string{csi.CephFSDriverName, csi.RBDDriverName} - - logger.Infof("checking any PVC created by drivers %q and %q with clusterID %q", csi.CephFSDriverName, csi.RBDDriverName, cluster.Namespace) - // check any PV is created in this cluster - attachmentsExist, err := c.checkPVPresentInCluster(drivers, cluster.Namespace) - if err != nil { - return errors.Wrapf(err, "failed to list PersistentVolumes") - } - // no PVC created in this cluster - if !attachmentsExist { - logger.Infof("no volume attachments for cluster %q", cluster.Namespace) - return nil - } - - return errors.Errorf("waiting for csi volume attachments in cluster %q to be cleaned up", cluster.Namespace) -} - -func (c *ClusterController) checkPVPresentInCluster(drivers []string, clusterID string) (bool, error) { - ctx := context.TODO() - pv, err := c.context.Clientset.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, errors.Wrapf(err, "failed to list PV") - } - - for _, p := range pv.Items { - if p.Spec.CSI == nil { - logger.Errorf("Spec.CSI is nil for PV %q", p.Name) - continue - } - if p.Spec.CSI.VolumeAttributes["clusterID"] == clusterID { - //check PV is created by drivers deployed by rook - for _, d := range drivers { - if d == p.Spec.CSI.Driver { - return true, nil - } - } - - } - } - return false, nil -} - -// removeFinalizer removes a finalizer -func removeFinalizer(client client.Client, name types.NamespacedName) error { - cephCluster := &cephv1.CephCluster{} - err := client.Get(context.TODO(), name, cephCluster) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephCluster resource not found. Ignoring since object must be deleted.") - return nil - } - return errors.Wrapf(err, "failed to retrieve ceph cluster %q to remove finalizer", name.Name) - } - - err = opcontroller.RemoveFinalizer(client, cephCluster) - if err != nil { - return errors.Wrap(err, "failed to remove finalizer") - } - - return nil -} - -func (c *ClusterController) deleteOSDEncryptionKeyFromKMS(currentCluster *cephv1.CephCluster) error { - // If the operator was stopped and we enter this code, the map is empty - if _, ok := c.clusterMap[currentCluster.Namespace]; !ok { - c.clusterMap[currentCluster.Namespace] = &cluster{ClusterInfo: &cephclient.ClusterInfo{Namespace: currentCluster.Namespace}} - } - - // Fetch PVCs - osdPVCs, _, err := osd.GetExistingPVCs(c.context, currentCluster.Namespace) - if err != nil { - return errors.Wrap(err, "failed to list osd pvc") - } - - // Initialize the KMS code - kmsConfig := kms.NewConfig(c.context, ¤tCluster.Spec, c.clusterMap[currentCluster.Namespace].ClusterInfo) - - // If token auth is used by the KMS we set it as an env variable - if currentCluster.Spec.Security.KeyManagementService.IsTokenAuthEnabled() { - err := kms.SetTokenToEnvVar(c.context, currentCluster.Spec.Security.KeyManagementService.TokenSecretName, kmsConfig.Provider, currentCluster.Namespace) - if err != nil { - return errors.Wrapf(err, "failed to fetch kms token secret %q", currentCluster.Spec.Security.KeyManagementService.TokenSecretName) - } - } - - // Delete each PV KEK - for _, osdPVC := range osdPVCs { - // Generate and store the encrypted key in whatever KMS is configured - err = kmsConfig.DeleteSecret(osdPVC.Name) - if err != nil { - logger.Errorf("failed to delete secret. %v", err) - continue - } - } - - return nil -} diff --git a/pkg/operator/ceph/cluster/controller_test.go b/pkg/operator/ceph/cluster/controller_test.go deleted file mode 100644 index 9cbe1b049..000000000 --- a/pkg/operator/ceph/cluster/controller_test.go +++ /dev/null @@ -1,270 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "context" - "os" - "testing" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - "github.com/rook/rook/pkg/operator/k8sutil" - testop "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - "github.com/tevino/abool" - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - dynamicfake "k8s.io/client-go/dynamic/fake" - k8sfake "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/record" - clientfake "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func TestReconcile_DeleteCephCluster(t *testing.T) { - ctx := context.TODO() - cephNs := "rook-ceph" - clusterName := "my-cluster" - nsName := types.NamespacedName{ - Name: clusterName, - Namespace: cephNs, - } - - fakeCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: cephNs, - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - }, - } - - fakePool := &cephv1.CephBlockPool{ - TypeMeta: metav1.TypeMeta{ - Kind: "CephBlockPool", - APIVersion: "ceph.rook.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-block-pool", - Namespace: cephNs, - }, - } - - // create a Rook-Ceph scheme to use for our tests - scheme := runtime.NewScheme() - assert.NoError(t, cephv1.AddToScheme(scheme)) - - t.Run("deletion blocked while dependencies exist", func(t *testing.T) { - // set up clusterd.Context - clusterdCtx := &clusterd.Context{ - Clientset: k8sfake.NewSimpleClientset(), - // reconcile looks for fake dependencies in the dynamic clientset - DynamicClientset: dynamicfake.NewSimpleDynamicClient(scheme, fakePool), - RequestCancelOrchestration: abool.New(), - } - - // set up ClusterController - volumeAttachmentController := &attachment.MockAttachment{ - MockList: func(namespace string) (*rookalpha.VolumeList, error) { - t.Log("test vol attach list") - return &rookalpha.VolumeList{Items: []rookalpha.Volume{}}, nil - }, - } - operatorConfigCallbacks := []func() error{ - func() error { - t.Log("test op config callback") - return nil - }, - } - addCallbacks := []func() error{ - func() error { - t.Log("test success callback") - return nil - }, - } - - // create the cluster controller and tell it that the cluster has been deleted - controller := NewClusterController(clusterdCtx, "", volumeAttachmentController, operatorConfigCallbacks, addCallbacks) - fakeRecorder := record.NewFakeRecorder(5) - controller.recorder = k8sutil.NewEventReporter(fakeRecorder) - - // Create a fake client to mock API calls - // Make sure it has the fake CephCluster that is to be deleted in it - client := clientfake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(fakeCluster).Build() - - // Create a ReconcileCephClient object with the scheme and fake client. - reconcileCephCluster := &ReconcileCephCluster{ - client: client, - scheme: scheme, - context: clusterdCtx, - clusterController: controller, - } - - req := reconcile.Request{NamespacedName: nsName} - - resp, err := reconcileCephCluster.Reconcile(ctx, req) - assert.NoError(t, err) - assert.NotZero(t, resp.RequeueAfter) - event := <-fakeRecorder.Events - assert.Contains(t, event, "CephBlockPools") - assert.Contains(t, event, "my-block-pool") - - blockedCluster := &cephv1.CephCluster{} - err = client.Get(ctx, nsName, blockedCluster) - assert.NoError(t, err) - status := blockedCluster.Status - assert.Equal(t, cephv1.ConditionDeleting, status.Phase) - assert.Equal(t, cephv1.ClusterState(cephv1.ConditionDeleting), status.State) - assert.Equal(t, corev1.ConditionTrue, cephv1.FindStatusCondition(status.Conditions, cephv1.ConditionDeleting).Status) - assert.Equal(t, corev1.ConditionTrue, cephv1.FindStatusCondition(status.Conditions, cephv1.ConditionDeletionIsBlocked).Status) - - // delete blocking dependency - gvr := cephv1.SchemeGroupVersion.WithResource("cephblockpools") - err = clusterdCtx.DynamicClientset.Resource(gvr).Namespace(cephNs).Delete(ctx, "my-block-pool", metav1.DeleteOptions{}) - assert.NoError(t, err) - - resp, err = reconcileCephCluster.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, resp.IsZero()) - event = <-fakeRecorder.Events - assert.Contains(t, event, "Deleting") - - unblockedCluster := &cephv1.CephCluster{} - err = client.Get(ctx, nsName, unblockedCluster) - assert.Error(t, err) - assert.True(t, kerrors.IsNotFound(err)) - }) -} - -func Test_checkIfVolumesExist(t *testing.T) { - t.Run("flexvolume enabled", func(t *testing.T) { - nodeName := "node841" - clusterName := "cluster684" - pvName := "pvc-540" - rookSystemNamespace := "rook-system-6413" - - os.Setenv("ROOK_ENABLE_FLEX_DRIVER", "true") - os.Setenv(k8sutil.PodNamespaceEnvVar, rookSystemNamespace) - defer os.Unsetenv("ROOK_ENABLE_FLEX_DRIVER") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - context := &clusterd.Context{ - Clientset: testop.New(t, 3), - } - listCount := 0 - volumeAttachmentController := &attachment.MockAttachment{ - MockList: func(namespace string) (*rookalpha.VolumeList, error) { - listCount++ - if listCount == 1 { - // first listing returns an existing volume attachment, so the controller should wait - return &rookalpha.VolumeList{ - Items: []rookalpha.Volume{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: pvName, - Namespace: rookSystemNamespace, - }, - Attachments: []rookalpha.Attachment{ - { - Node: nodeName, - ClusterName: clusterName, - }, - }, - }, - }, - }, nil - } - - // subsequent listings should return no volume attachments, meaning that they have all - // been cleaned up and the controller can move on. - return &rookalpha.VolumeList{Items: []rookalpha.Volume{}}, nil - - }, - } - operatorConfigCallbacks := []func() error{ - func() error { - logger.Infof("test success callback") - return nil - }, - } - addCallbacks := []func() error{ - func() error { - logger.Infof("test success callback") - return nil - }, - } - // create the cluster controller and tell it that the cluster has been deleted - controller := NewClusterController(context, "", volumeAttachmentController, operatorConfigCallbacks, addCallbacks) - clusterToDelete := &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Namespace: clusterName}} - - // The test returns a volume on the first call - assert.Error(t, controller.checkIfVolumesExist(clusterToDelete)) - - // The test does not return volumes on the second call - assert.NoError(t, controller.checkIfVolumesExist(clusterToDelete)) - }) - - t.Run("flexvolume disabled (CSI)", func(t *testing.T) { - clusterName := "cluster684" - rookSystemNamespace := "rook-system-6413" - - os.Setenv("ROOK_ENABLE_FLEX_DRIVER", "false") - os.Setenv(k8sutil.PodNamespaceEnvVar, rookSystemNamespace) - defer os.Unsetenv("ROOK_ENABLE_FLEX_DRIVER") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - context := &clusterd.Context{ - Clientset: testop.New(t, 3), - } - listCount := 0 - volumeAttachmentController := &attachment.MockAttachment{ - MockList: func(namespace string) (*rookalpha.VolumeList, error) { - listCount++ - return &rookalpha.VolumeList{Items: []rookalpha.Volume{}}, nil - - }, - } - operatorConfigCallbacks := []func() error{ - func() error { - logger.Infof("test success callback") - return nil - }, - } - addCallbacks := []func() error{ - func() error { - logger.Infof("test success callback") - os.Setenv("ROOK_ENABLE_FLEX_DRIVER", "true") - os.Setenv(k8sutil.PodNamespaceEnvVar, rookSystemNamespace) - defer os.Unsetenv("ROOK_ENABLE_FLEX_DRIVER") - return nil - }, - } - // create the cluster controller and tell it that the cluster has been deleted - controller := NewClusterController(context, "", volumeAttachmentController, operatorConfigCallbacks, addCallbacks) - clusterToDelete := &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Namespace: clusterName}} - assert.NoError(t, controller.checkIfVolumesExist(clusterToDelete)) - - // Ensure that the listing of volume attachments was never called. - assert.Equal(t, 0, listCount) - }) -} diff --git a/pkg/operator/ceph/cluster/crash/add.go b/pkg/operator/ceph/cluster/crash/add.go deleted file mode 100644 index 40a755c65..000000000 --- a/pkg/operator/ceph/cluster/crash/add.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package crash - -import ( - "reflect" - "strings" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - controllerName = "ceph-crashcollector-controller" - // AppName is the value to the "app" label for the ceph-crash pods - AppName = "rook-ceph-crashcollector" - prunerName = "rook-ceph-crashcollector-pruner" - // NodeNameLabel is a node name label - NodeNameLabel = "node_name" -) - -// Add adds a new Controller based on nodedrain.ReconcileNode and registers the relevant watches and handlers -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - return &ReconcileNode{ - client: mgr.GetClient(), - scheme: mgr.GetScheme(), - context: context, - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return errors.Wrapf(err, "failed to create a new %q", controllerName) - } - logger.Info("successfully started") - - // Watch for changes to the nodes - specChangePredicate := predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - nodeOld, ok := e.ObjectOld.DeepCopyObject().(*corev1.Node) - if !ok { - return false - } - nodeNew, ok := e.ObjectNew.DeepCopyObject().(*corev1.Node) - if !ok { - return false - } - return !reflect.DeepEqual(nodeOld.Spec, nodeNew.Spec) - }, - } - logger.Debugf("watch for changes to the nodes") - err = c.Watch(&source.Kind{Type: &corev1.Node{}}, &handler.EnqueueRequestForObject{}, specChangePredicate) - if err != nil { - return errors.Wrap(err, "failed to watch for node changes") - } - - // Watch for changes to the ceph-crash deployments - logger.Debugf("watch for changes to the ceph-crash deployments") - err = c.Watch( - &source.Kind{Type: &appsv1.Deployment{}}, - handler.EnqueueRequestsFromMapFunc(handler.MapFunc(func(obj client.Object) []reconcile.Request { - deployment, ok := obj.(*appsv1.Deployment) - if !ok { - return []reconcile.Request{} - } - labels := deployment.GetLabels() - appName, ok := labels[k8sutil.AppAttr] - if !ok || appName != AppName { - return []reconcile.Request{} - } - nodeName, ok := deployment.Spec.Template.ObjectMeta.Labels[NodeNameLabel] - if !ok { - return []reconcile.Request{} - } - req := reconcile.Request{NamespacedName: types.NamespacedName{Name: nodeName}} - return []reconcile.Request{req} - }), - ), - ) - if err != nil { - return errors.Wrap(err, "failed to watch for changes on the ceph-crash deployment") - } - - // Watch for changes to the ceph pod nodename and enqueue their nodes - logger.Debugf("watch for changes to the ceph pod nodename and enqueue their nodes") - err = c.Watch( - &source.Kind{Type: &corev1.Pod{}}, - handler.EnqueueRequestsFromMapFunc(handler.MapFunc(func(obj client.Object) []reconcile.Request { - pod, ok := obj.(*corev1.Pod) - if !ok { - return []reconcile.Request{} - } - nodeName := pod.Spec.NodeName - if nodeName == "" { - return []reconcile.Request{} - } - if isCephPod(pod.Labels, pod.Name) { - req := reconcile.Request{NamespacedName: types.NamespacedName{Name: nodeName}} - return []reconcile.Request{req} - } - return []reconcile.Request{} - }), - ), - // only enqueue the update event if the pod moved nodes - predicate.Funcs{ - UpdateFunc: func(event event.UpdateEvent) bool { - oldPod, ok := event.ObjectOld.(*corev1.Pod) - if !ok { - return false - } - newPod, ok := event.ObjectNew.(*corev1.Pod) - if !ok { - return false - } - // only enqueue if the nodename has changed - if oldPod.Spec.NodeName == newPod.Spec.NodeName { - return false - } - return true - }, - }, - ) - if err != nil { - return errors.Wrap(err, "failed to watch for changes on the ceph pod nodename and enqueue their nodes") - } - - return nil -} - -func isCephPod(labels map[string]string, podName string) bool { - _, ok := labels["rook_cluster"] - // canary pods for monitors might stick around during startup - // at that time, the initial monitors haven't been deployed yet. - // If we don't invalidate canary pods, - // the crash collector pod will start and environment variable like 'ROOK_CEPH_MON_HOST' - // will be empty since the monitors don't exist yet - isCanaryPod := strings.Contains(podName, "-canary-") - if ok && !isCanaryPod { - logger.Debugf("%q is a ceph pod!", podName) - return true - } - - return false -} diff --git a/pkg/operator/ceph/cluster/crash/add_test.go b/pkg/operator/ceph/cluster/crash/add_test.go deleted file mode 100644 index 2c2bb6725..000000000 --- a/pkg/operator/ceph/cluster/crash/add_test.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package crash - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestIsCephPod(t *testing.T) { - labels := make(map[string]string) - - labels["foo"] = "bar" - podName := "ceph" - b := isCephPod(labels, podName) - assert.False(t, b) - - // Label is correct but this is a canary pod, this is not valid! - podName = "rook-ceph-mon-b-canary-664f5bf8cd-697hh" - labels["rook_cluster"] = "rook-ceph" - b = isCephPod(labels, podName) - assert.False(t, b) - - // Label is correct and this is not a canary pod - podName = "rook-ceph-mon" - b = isCephPod(labels, podName) - assert.True(t, b) -} diff --git a/pkg/operator/ceph/cluster/crash/crash.go b/pkg/operator/ceph/cluster/crash/crash.go deleted file mode 100644 index 8ff255ac4..000000000 --- a/pkg/operator/ceph/cluster/crash/crash.go +++ /dev/null @@ -1,296 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package crash - -import ( - "context" - "fmt" - "path" - - "k8s.io/api/batch/v1beta1" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - "github.com/rook/rook/pkg/operator/ceph/controller" - - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -const ( - crashCollectorKeyringUsername = "client.crash" - crashCollectorKeyName = "rook-ceph-crash-collector-keyring" - // pruneSchedule is scheduled to run every day at midnight. - pruneSchedule = "0 0 * * *" -) - -// createOrUpdateCephCrash is a wrapper around controllerutil.CreateOrUpdate -func (r *ReconcileNode) createOrUpdateCephCrash(node corev1.Node, tolerations []corev1.Toleration, cephCluster cephv1.CephCluster, cephVersion *cephver.CephVersion) (controllerutil.OperationResult, error) { - // Create or Update the deployment default/foo - nodeHostnameLabel, ok := node.ObjectMeta.Labels[corev1.LabelHostname] - if !ok { - return controllerutil.OperationResultNone, errors.Errorf("label key %q does not exist on node %q", corev1.LabelHostname, node.GetName()) - } - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: k8sutil.TruncateNodeName(fmt.Sprintf("%s-%%s", AppName), nodeHostnameLabel), - Namespace: cephCluster.GetNamespace(), - }, - } - err := controllerutil.SetControllerReference(&cephCluster, deploy, r.scheme) - if err != nil { - return controllerutil.OperationResultNone, errors.Errorf("failed to set owner reference of crashcollector deployment %q", deploy.Name) - } - - volumes := controller.DaemonVolumesBase(config.NewDatalessDaemonDataPathMap(cephCluster.GetNamespace(), cephCluster.Spec.DataDirHostPath), "") - volumes = append(volumes, keyring.Volume().CrashCollector()) - - mutateFunc := func() error { - - // labels for the pod, the deployment, and the deploymentSelector - deploymentLabels := map[string]string{ - corev1.LabelHostname: nodeHostnameLabel, - k8sutil.AppAttr: AppName, - NodeNameLabel: node.GetName(), - } - deploymentLabels[config.CrashType] = "crash" - deploymentLabels[controller.DaemonIDLabel] = "crash" - deploymentLabels[k8sutil.ClusterAttr] = cephCluster.GetNamespace() - - selectorLabels := map[string]string{ - corev1.LabelHostname: nodeHostnameLabel, - k8sutil.AppAttr: AppName, - NodeNameLabel: node.GetName(), - } - - nodeSelector := map[string]string{corev1.LabelHostname: nodeHostnameLabel} - - // Deployment selector is immutable so we set this value only if - // a new object is going to be created - if deploy.ObjectMeta.CreationTimestamp.IsZero() { - deploy.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: selectorLabels, - } - } - - deploy.ObjectMeta.Labels = deploymentLabels - k8sutil.AddRookVersionLabelToDeployment(deploy) - if cephVersion != nil { - controller.AddCephVersionLabelToDeployment(*cephVersion, deploy) - } - deploy.Spec.Template = corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: deploymentLabels, - }, - Spec: corev1.PodSpec{ - NodeSelector: nodeSelector, - InitContainers: []corev1.Container{ - getCrashDirInitContainer(cephCluster), - getCrashChownInitContainer(cephCluster), - }, - Containers: []corev1.Container{ - getCrashDaemonContainer(cephCluster, *cephVersion), - }, - Tolerations: tolerations, - RestartPolicy: corev1.RestartPolicyAlways, - HostNetwork: cephCluster.Spec.Network.IsHost(), - Volumes: volumes, - }, - } - - return nil - } - - return controllerutil.CreateOrUpdate(context.TODO(), r.client, deploy, mutateFunc) -} - -// createOrUpdateCephCron is a wrapper around controllerutil.CreateOrUpdate -func (r *ReconcileNode) createOrUpdateCephCron(cephCluster cephv1.CephCluster, cephVersion *cephver.CephVersion, useCronJobV1 bool) (controllerutil.OperationResult, error) { - objectMeta := metav1.ObjectMeta{ - Name: prunerName, - Namespace: cephCluster.GetNamespace(), - } - // Adding volumes to pods containing data needed to connect to the ceph cluster. - volumes := controller.DaemonVolumesBase(config.NewDatalessDaemonDataPathMap(cephCluster.GetNamespace(), cephCluster.Spec.DataDirHostPath), "") - volumes = append(volumes, keyring.Volume().CrashCollector()) - - // labels for the pod, the deployment, and the deploymentSelector - cronJobLabels := map[string]string{ - k8sutil.AppAttr: prunerName, - } - cronJobLabels[k8sutil.ClusterAttr] = cephCluster.GetNamespace() - - podTemplateSpec := corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: cronJobLabels, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - getCrashPruneContainer(cephCluster, *cephVersion), - }, - RestartPolicy: corev1.RestartPolicyNever, - HostNetwork: cephCluster.Spec.Network.IsHost(), - Volumes: volumes, - }, - } - - // After 100 failures, the cron job will no longer run. - // To avoid this, the cronjob is configured to only count the failures - // that occurred in the last hour. - deadline := int64(60) - - // minimum k8s version required for v1 cronJob is 'v1.21.0'. Apply v1 if k8s version is at least 'v1.21.0', else apply v1beta1 cronJob. - if useCronJobV1 { - // delete v1beta1 cronJob if it already exists - err := r.client.Delete(context.TODO(), &v1beta1.CronJob{ObjectMeta: objectMeta}) - if err != nil && !apierrors.IsNotFound(err) { - return controllerutil.OperationResultNone, errors.Wrapf(err, "failed to delete CronJob v1Beta1 %q", prunerName) - } - - cronJob := &v1.CronJob{ObjectMeta: objectMeta} - err = controllerutil.SetControllerReference(&cephCluster, cronJob, r.scheme) - if err != nil { - return controllerutil.OperationResultNone, errors.Errorf("failed to set owner reference of deployment %q", cronJob.Name) - } - mutateFunc := func() error { - cronJob.ObjectMeta.Labels = cronJobLabels - cronJob.Spec.JobTemplate.Spec.Template = podTemplateSpec - cronJob.Spec.Schedule = pruneSchedule - cronJob.Spec.StartingDeadlineSeconds = &deadline - - return nil - } - - return controllerutil.CreateOrUpdate(context.TODO(), r.client, cronJob, mutateFunc) - } - cronJob := &v1beta1.CronJob{ObjectMeta: objectMeta} - err := controllerutil.SetControllerReference(&cephCluster, cronJob, r.scheme) - if err != nil { - return controllerutil.OperationResultNone, errors.Errorf("failed to set owner reference of deployment %q", cronJob.Name) - } - - mutateFunc := func() error { - cronJob.ObjectMeta.Labels = cronJobLabels - cronJob.Spec.JobTemplate.Spec.Template = podTemplateSpec - cronJob.Spec.Schedule = pruneSchedule - cronJob.Spec.StartingDeadlineSeconds = &deadline - - return nil - } - - return controllerutil.CreateOrUpdate(context.TODO(), r.client, cronJob, mutateFunc) -} - -func getCrashDirInitContainer(cephCluster cephv1.CephCluster) corev1.Container { - dataPathMap := config.NewDatalessDaemonDataPathMap(cephCluster.GetNamespace(), cephCluster.Spec.DataDirHostPath) - crashPostedDir := path.Join(dataPathMap.ContainerCrashDir(), "posted") - - container := corev1.Container{ - Name: "make-container-crash-dir", - Command: []string{ - "mkdir", - "-p", - }, - Args: []string{ - crashPostedDir, - }, - Image: cephCluster.Spec.CephVersion.Image, - SecurityContext: controller.PodSecurityContext(), - Resources: cephv1.GetCrashCollectorResources(cephCluster.Spec.Resources), - VolumeMounts: controller.DaemonVolumeMounts(dataPathMap, ""), - } - return container -} - -func getCrashChownInitContainer(cephCluster cephv1.CephCluster) corev1.Container { - dataPathMap := config.NewDatalessDaemonDataPathMap(cephCluster.GetNamespace(), cephCluster.Spec.DataDirHostPath) - - return controller.ChownCephDataDirsInitContainer( - *dataPathMap, - cephCluster.Spec.CephVersion.Image, - controller.DaemonVolumeMounts(dataPathMap, ""), - cephv1.GetCrashCollectorResources(cephCluster.Spec.Resources), - controller.PodSecurityContext(), - ) -} - -func getCrashDaemonContainer(cephCluster cephv1.CephCluster, cephVersion cephver.CephVersion) corev1.Container { - cephImage := cephCluster.Spec.CephVersion.Image - dataPathMap := config.NewDatalessDaemonDataPathMap(cephCluster.GetNamespace(), cephCluster.Spec.DataDirHostPath) - crashEnvVar := generateCrashEnvVar() - envVars := append(controller.DaemonEnvVars(cephImage), crashEnvVar) - volumeMounts := controller.DaemonVolumeMounts(dataPathMap, "") - volumeMounts = append(volumeMounts, keyring.VolumeMount().CrashCollector()) - - container := corev1.Container{ - Name: "ceph-crash", - Command: []string{ - "ceph-crash", - }, - Image: cephImage, - Env: envVars, - VolumeMounts: volumeMounts, - Resources: cephv1.GetCrashCollectorResources(cephCluster.Spec.Resources), - SecurityContext: controller.PodSecurityContext(), - } - - return container -} - -func getCrashPruneContainer(cephCluster cephv1.CephCluster, cephVersion cephver.CephVersion) corev1.Container { - cephImage := cephCluster.Spec.CephVersion.Image - envVars := append(controller.DaemonEnvVars(cephImage), generateCrashEnvVar()) - dataPathMap := config.NewDatalessDaemonDataPathMap(cephCluster.GetNamespace(), cephCluster.Spec.DataDirHostPath) - volumeMounts := controller.DaemonVolumeMounts(dataPathMap, "") - volumeMounts = append(volumeMounts, keyring.VolumeMount().CrashCollector()) - - container := corev1.Container{ - Name: "ceph-crash-pruner", - Command: []string{ - "ceph", - "-n", - crashClient, - "crash", - "prune", - }, - Args: []string{ - fmt.Sprintf("%d", cephCluster.Spec.CrashCollector.DaysToRetain), - }, - Image: cephImage, - Env: envVars, - VolumeMounts: volumeMounts, - Resources: cephv1.GetCrashCollectorResources(cephCluster.Spec.Resources), - SecurityContext: controller.PodSecurityContext(), - } - - return container -} - -func generateCrashEnvVar() corev1.EnvVar { - val := fmt.Sprintf("-m $(ROOK_CEPH_MON_HOST) -k %s", keyring.VolumeMount().CrashCollectorKeyringFilePath()) - env := corev1.EnvVar{Name: "CEPH_ARGS", Value: val} - - return env -} diff --git a/pkg/operator/ceph/cluster/crash/crash_test.go b/pkg/operator/ceph/cluster/crash/crash_test.go deleted file mode 100644 index fa4d9786e..000000000 --- a/pkg/operator/ceph/cluster/crash/crash_test.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package crash - -import ( - "context" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - "github.com/tevino/abool" - v1 "k8s.io/api/batch/v1" - "k8s.io/api/batch/v1beta1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - cntrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -func TestGenerateCrashEnvVar(t *testing.T) { - env := generateCrashEnvVar() - assert.Equal(t, "CEPH_ARGS", env.Name) - assert.Equal(t, "-m $(ROOK_CEPH_MON_HOST) -k /etc/ceph/crash-collector-keyring-store/keyring", env.Value) -} - -func TestCreateOrUpdateCephCron(t *testing.T) { - cephCluster := cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Namespace: "rook-ceph"}} - cephVersion := &cephver.CephVersion{Major: 16, Minor: 2, Extra: 0} - ctx := context.TODO() - context := &clusterd.Context{ - Clientset: test.New(t, 1), - RookClientset: rookclient.NewSimpleClientset(), - RequestCancelOrchestration: abool.New(), - } - - s := scheme.Scheme - err := v1.AddToScheme(s) - if err != nil { - assert.Fail(t, "failed to build scheme") - } - err = v1beta1.AddToScheme(s) - if err != nil { - assert.Fail(t, "failed to build scheme") - } - - r := &ReconcileNode{ - scheme: s, - client: fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects().Build(), - context: context, - } - - cronV1 := &v1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: prunerName, - Namespace: "rook-ceph", - }, - } - - cronV1Beta1 := &v1beta1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: prunerName, - Namespace: "rook-ceph", - }, - } - - // check if v1beta1 cronJob is present and v1 cronJob is not - controllerutil, err := r.createOrUpdateCephCron(cephCluster, cephVersion, false) - assert.NoError(t, err) - assert.Equal(t, controllerutil, cntrlutil.OperationResult("created")) - - err = r.client.Get(ctx, types.NamespacedName{Namespace: "rook-ceph", Name: prunerName}, cronV1Beta1) - assert.NoError(t, err) - - err = r.client.Get(ctx, types.NamespacedName{Namespace: "rook-ceph", Name: prunerName}, cronV1) - assert.Error(t, err) - assert.True(t, kerrors.IsNotFound(err)) - - // check if v1 cronJob is present and v1beta1 cronJob is not - controllerutil, err = r.createOrUpdateCephCron(cephCluster, cephVersion, true) - assert.NoError(t, err) - assert.Equal(t, controllerutil, cntrlutil.OperationResult("created")) - - err = r.client.Get(ctx, types.NamespacedName{Namespace: "rook-ceph", Name: prunerName}, cronV1) - assert.NoError(t, err) - - err = r.client.Get(ctx, types.NamespacedName{Namespace: "rook-ceph", Name: prunerName}, cronV1Beta1) - assert.Error(t, err) - assert.True(t, kerrors.IsNotFound(err)) -} diff --git a/pkg/operator/ceph/cluster/crash/keyring.go b/pkg/operator/ceph/cluster/crash/keyring.go deleted file mode 100644 index a6400f8b0..000000000 --- a/pkg/operator/ceph/cluster/crash/keyring.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package crash - -import ( - "fmt" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - "github.com/rook/rook/pkg/operator/k8sutil" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - crashClient = `client.crash` - crashKeyringTemplate = ` -[client.crash] - key = %s - caps mon = "allow profile crash" - caps mgr = "allow rw" -` -) - -// CreateCrashCollectorSecret creates the Kubernetes Crash Collector Secret -func CreateCrashCollectorSecret(context *clusterd.Context, clusterInfo *client.ClusterInfo) error { - k := keyring.GetSecretStore(context, clusterInfo, clusterInfo.OwnerInfo) - - // Create CrashCollector Ceph key - crashCollectorSecretKey, err := createCrashCollectorKeyring(k) - if err != nil { - return errors.Wrapf(err, "failed to create %q ceph keyring", crashCollectorKeyringUsername) - } - - // Create or update Kubernetes CSI secret - if err := createOrUpdateCrashCollectorSecret(clusterInfo, crashCollectorSecretKey, k); err != nil { - return errors.Wrap(err, "failed to create kubernetes csi secret") - } - - return nil -} - -func cephCrashCollectorKeyringCaps() []string { - return []string{ - "mon", "allow profile crash", - "mgr", "allow rw", - } -} - -func createCrashCollectorKeyring(s *keyring.SecretStore) (string, error) { - key, err := s.GenerateKey(crashCollectorKeyringUsername, cephCrashCollectorKeyringCaps()) - if err != nil { - return "", err - } - - return key, nil -} - -func createOrUpdateCrashCollectorSecret(clusterInfo *client.ClusterInfo, crashCollectorSecretKey string, k *keyring.SecretStore) error { - - keyring := fmt.Sprintf(crashKeyringTemplate, crashCollectorSecretKey) - - crashCollectorSecret := map[string][]byte{ - "keyring": []byte(keyring), - } - - s := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: crashCollectorKeyName, - Namespace: clusterInfo.Namespace, - }, - Data: crashCollectorSecret, - Type: k8sutil.RookType, - } - err := clusterInfo.OwnerInfo.SetControllerReference(s) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to crash controller secret %q", s.Name) - } - - // Create Kubernetes Secret - err = k.CreateSecret(s) - if err != nil { - return errors.Wrapf(err, "failed to create kubernetes secret %q for cluster %q", crashCollectorSecret, clusterInfo.Namespace) - } - - logger.Infof("created kubernetes crash collector secret for cluster %q", clusterInfo.Namespace) - return nil -} diff --git a/pkg/operator/ceph/cluster/crash/keyring_test.go b/pkg/operator/ceph/cluster/crash/keyring_test.go deleted file mode 100644 index ea8f9f868..000000000 --- a/pkg/operator/ceph/cluster/crash/keyring_test.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package crash - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestCephCrashCollectorKeyringCaps(t *testing.T) { - caps := cephCrashCollectorKeyringCaps() - assert.Equal(t, caps, []string{"mon", "allow profile crash", "mgr", "allow rw"}) -} diff --git a/pkg/operator/ceph/cluster/crash/reconcile.go b/pkg/operator/ceph/cluster/crash/reconcile.go deleted file mode 100644 index f18bdfb7d..000000000 --- a/pkg/operator/ceph/cluster/crash/reconcile.go +++ /dev/null @@ -1,329 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package crash - -import ( - "context" - "time" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/cluster/rbd" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/batch/v1" - "k8s.io/api/batch/v1beta1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/version" - - "github.com/rook/rook/pkg/operator/ceph/file/mds" - "github.com/rook/rook/pkg/operator/ceph/file/mirror" - "github.com/rook/rook/pkg/operator/ceph/object" - - "github.com/coreos/pkg/capnslog" - - "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - "github.com/rook/rook/pkg/operator/k8sutil" - - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - // Implement reconcile.Reconciler so the controller can reconcile objects - _ reconcile.Reconciler = &ReconcileNode{} - - // wait for secret "rook-ceph-crash-collector-keyring" to be created - waitForRequeueIfSecretNotCreated = reconcile.Result{Requeue: true, RequeueAfter: 30 * time.Second} -) - -const ( - MinVersionForCronV1 = "1.21.0" -) - -// ReconcileNode reconciles ReplicaSets -type ReconcileNode struct { - // client can be used to retrieve objects from the APIServer. - scheme *runtime.Scheme - client client.Client - context *clusterd.Context -} - -// Reconcile reconciles a node and ensures that it has a crashcollector deployment -// attached to it. -// The Controller will requeue the Request to be processed again if an error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileNode) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - result, err := r.reconcile(request) - if err != nil { - logger.Error(err) - } - return result, err -} - -func (r *ReconcileNode) reconcile(request reconcile.Request) (reconcile.Result, error) { - logger.Debugf("reconciling node: %q", request.Name) - - // get the node object - node := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: request.Name}} - err := r.client.Get(context.TODO(), request.NamespacedName, node) - if err != nil { - if kerrors.IsNotFound(err) { - // if a node is not present, check if there are any crashcollector deployment for that node and delete it. - err := r.listCrashCollectorAndDelete(request.Name, request.Namespace) - if err != nil { - logger.Errorf("failed to list and delete crash collector deployment on node %q; user should delete them manually. %v", request.Name, err) - } - } else { - return reconcile.Result{}, errors.Wrapf(err, "could not get node %q", request.Name) - } - } - - // Get the list of all the Ceph pods - cephPods, err := r.cephPodList() - if err != nil { - if len(cephPods) == 0 { - return reconcile.Result{}, nil - } - return reconcile.Result{}, errors.Wrap(err, "failed to list all ceph pods") - } - - namespaceToPodList := make(map[string][]corev1.Pod) - for _, cephPod := range cephPods { - podNamespace := cephPod.GetNamespace() - podList, ok := namespaceToPodList[podNamespace] - if !ok { - // initialize list - namespaceToPodList[podNamespace] = []corev1.Pod{cephPod} - } else { - // append cephPod to namespace's pod list - namespaceToPodList[podNamespace] = append(podList, cephPod) - } - } - - for namespace, cephPods := range namespaceToPodList { - // get dataDirHostPath from the CephCluster - cephClusters := &cephv1.CephClusterList{} - err := r.client.List(context.TODO(), cephClusters, client.InNamespace(namespace)) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "could not get cephcluster in namespaces %q", namespace) - } - if len(cephClusters.Items) < 1 { - logger.Debugf("no CephCluster found in the namespace %q", namespace) - return reconcile.Result{}, nil - } - - cephCluster := cephClusters.Items[0] - if len(cephClusters.Items) > 1 { - logger.Errorf("more than one CephCluster found in the namespace %q, choosing the first one %q", namespace, cephCluster.GetName()) - } - - // If the crash controller is disabled in the spec let's do a noop - if cephCluster.Spec.CrashCollector.Disable { - deploymentList := &appsv1.DeploymentList{} - namespaceListOpts := client.InNamespace(request.Namespace) - - // Try to fetch the list of existing deployment and remove them - err := r.client.List(context.TODO(), deploymentList, client.MatchingLabels{k8sutil.AppAttr: AppName}, namespaceListOpts) - if err != nil { - logger.Errorf("failed to list crash collector deployments, delete it/them manually. %v", err) - return reconcile.Result{}, nil - } - - // Try to delete all the crash deployments - for _, d := range deploymentList.Items { - err := r.deleteCrashCollector(d) - if err != nil { - logger.Errorf("failed to delete crash collector deployment %q, delete it manually. %v", d.Name, err) - continue - } - logger.Infof("crash collector deployment %q successfully removed", d.Name) - } - - return reconcile.Result{}, nil - } - - // checking if secret "rook-ceph-crash-collector-keyring" is present which is required to create crashcollector pods - secret := &corev1.Secret{} - err = r.client.Get(context.TODO(), types.NamespacedName{Name: crashCollectorKeyName, Namespace: namespace}, secret) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debugf("secret %q not found. retrying in %q. %v", crashCollectorKeyName, waitForRequeueIfSecretNotCreated.RequeueAfter.String(), err) - return waitForRequeueIfSecretNotCreated, nil - } - - return reconcile.Result{}, errors.Wrapf(err, "failed to list the %q secret.", crashCollectorKeyName) - } - - clusterImage := cephCluster.Spec.CephVersion.Image - cephVersion, err := opcontroller.GetImageVersion(cephCluster) - if err != nil { - logger.Errorf("ceph version not found for image %q used by cluster %q. %v", clusterImage, cephCluster.Name, err) - return reconcile.Result{}, nil - } - - uniqueTolerations := controllerconfig.TolerationSet{} - hasCephPods := false - for _, cephPod := range cephPods { - if cephPod.Spec.NodeName == request.Name { - hasCephPods = true - for _, podToleration := range cephPod.Spec.Tolerations { - // Add toleration to the map - uniqueTolerations.Add(podToleration) - } - } - } - - // If the node has Ceph pods we create a crash collector - if hasCephPods { - tolerations := uniqueTolerations.ToList() - op, err := r.createOrUpdateCephCrash(*node, tolerations, cephCluster, cephVersion) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "node reconcile failed on op %q", op) - } - logger.Debugf("deployment successfully reconciled for node %q. operation: %q", request.Name, op) - // If there are no Ceph pods, check that there are no crash collector pods in case Ceph pods moved to another node - // Thus the crash collector must be removed from that node - } else { - err := r.listCrashCollectorAndDelete(request.Name, request.Namespace) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to list and delete crash collector deployments on node %q", request.Name) - } - } - - if err := r.reconcileCrashRetention(namespace, cephCluster, cephVersion); err != nil { - return reconcile.Result{}, err - } - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileNode) cephPodList() ([]corev1.Pod, error) { - cephPods := make([]corev1.Pod, 0) - cephAppNames := []string{mon.AppName, mgr.AppName, osd.AppName, object.AppName, mds.AppName, rbd.AppName, mirror.AppName} - - for _, app := range cephAppNames { - podList := &corev1.PodList{} - err := r.client.List(context.TODO(), podList, client.MatchingLabels{k8sutil.AppAttr: app}) - if err != nil { - return cephPods, errors.Wrapf(err, "could not list the %q pods", app) - } - - cephPods = append(cephPods, podList.Items...) - } - - return cephPods, nil -} - -func (r *ReconcileNode) listCrashCollectorAndDelete(nodeName, ns string) error { - deploymentList := &appsv1.DeploymentList{} - namespaceListOpts := client.InNamespace(ns) - err := r.client.List(context.TODO(), deploymentList, client.MatchingLabels{k8sutil.AppAttr: AppName, NodeNameLabel: nodeName}, namespaceListOpts) - if err != nil { - return errors.Wrap(err, "failed to list crash collector deployments") - } - for _, d := range deploymentList.Items { - logger.Infof("deleting deployment %q for node %q", d.ObjectMeta.Name, nodeName) - err := r.deleteCrashCollector(d) - if err != nil { - return errors.Wrapf(err, "failed to delete crash collector deployment %q", d.Name) - } - logger.Infof("successfully removed crash collector deployment %q from node %q", d.Name, nodeName) - } - - return nil -} - -func (r *ReconcileNode) deleteCrashCollector(deployment appsv1.Deployment) error { - deploymentName := deployment.ObjectMeta.Name - namespace := deployment.ObjectMeta.Namespace - dep := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: namespace, - }, - } - - err := r.client.Delete(context.TODO(), dep) - if err != nil && !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "could not delete crash collector deployment %q", deploymentName) - } - - return nil -} - -func (r *ReconcileNode) reconcileCrashRetention(namespace string, cephCluster cephv1.CephCluster, cephVersion *cephver.CephVersion) error { - k8sVersion, err := k8sutil.GetK8SVersion(r.context.Clientset) - if err != nil { - return errors.Wrap(err, "failed to get k8s version") - } - useCronJobV1 := k8sVersion.AtLeast(version.MustParseSemantic(MinVersionForCronV1)) - - objectMeta := metav1.ObjectMeta{ - Name: prunerName, - Namespace: namespace, - } - - if cephCluster.Spec.CrashCollector.DaysToRetain == 0 { - logger.Debug("deleting cronjob if it exists...") - - var cronJob client.Object - // minimum k8s version required for v1 cronJob is 'v1.21.0'. Apply v1 if k8s version is at least 'v1.21.0', else apply v1beta1 cronJob. - if useCronJobV1 { - // delete v1beta1 cronJob if it already exists - err = r.client.Delete(context.TODO(), &v1beta1.CronJob{ObjectMeta: objectMeta}) - if err != nil && !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to delete CronJob v1beta1 %q", prunerName) - } - cronJob = &v1.CronJob{ObjectMeta: objectMeta} - } else { - cronJob = &v1beta1.CronJob{ObjectMeta: objectMeta} - } - - err := r.client.Delete(context.TODO(), cronJob) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("cronJob resource not found. Ignoring since object must be deleted.") - } else { - return err - } - } else { - logger.Debug("successfully deleted crash pruner cronjob.") - } - } else { - logger.Debugf("daysToRetain set to: %d", cephCluster.Spec.CrashCollector.DaysToRetain) - op, err := r.createOrUpdateCephCron(cephCluster, cephVersion, useCronJobV1) - if err != nil { - return errors.Wrapf(err, "node reconcile failed on op %q", op) - } - logger.Debugf("cronjob successfully reconciled. operation: %q", op) - } - return nil -} diff --git a/pkg/operator/ceph/cluster/dependents.go b/pkg/operator/ceph/cluster/dependents.go deleted file mode 100644 index c5e18ad2a..000000000 --- a/pkg/operator/ceph/cluster/dependents.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "context" - "strings" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/util" - "github.com/rook/rook/pkg/util/dependents" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - // must use plural kinds - cephClusterDependentPluralKinds []string = []string{ - "CephBlockPools", - "CephRBDMirrors", - "CephFilesystems", - "CephFilesystemMirrors", - "CephObjectStores", - "CephObjectStoreUsers", - "CephObjectZones", - "CephObjectZoneGroups", - "CephObjectRealms", - "CephNFSes", - "CephClients", - } -) - -// CephClusterDependents returns a DependentList of dependents of a CephCluster in the namespace. -func CephClusterDependents(c *clusterd.Context, namespace string) (*dependents.DependentList, error) { - ctx := context.TODO() - - dependents := dependents.NewDependentList() - errs := []error{} - - for _, pluralKind := range cephClusterDependentPluralKinds { - resource := pluralKindToResource(pluralKind) - gvr := cephv1.SchemeGroupVersion.WithResource(resource) - list, err := c.DynamicClientset.Resource(gvr).Namespace(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - errs = append(errs, errors.Wrapf(err, "failed to list %s", pluralKind)) - continue - } - if len(list.Items) > 0 { - for _, obj := range list.Items { - dependents.Add(pluralKind, obj.GetName()) - } - } - } - // returns a nil error if there are no errors in the list - outErr := util.AggregateErrors(errs, "failed to list some dependents for CephCluster in namespace %q", namespace) - - return dependents, outErr -} - -func pluralKindToResource(pluralKind string) string { - // The dynamic client wants resources which are lower-case versions of the plural Kinds for - // Kubernetes CRDs in almost all cases. - return strings.ToLower(pluralKind) -} diff --git a/pkg/operator/ceph/cluster/dependents_test.go b/pkg/operator/ceph/cluster/dependents_test.go deleted file mode 100644 index fc648a2e1..000000000 --- a/pkg/operator/ceph/cluster/dependents_test.go +++ /dev/null @@ -1,258 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "testing" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/stretchr/testify/assert" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - dynamicfake "k8s.io/client-go/dynamic/fake" - k8stesting "k8s.io/client-go/testing" -) - -func TestCephClusterDependents(t *testing.T) { - scheme := runtime.NewScheme() - assert.NoError(t, cephv1.AddToScheme(scheme)) - - ns := "test-ceph-cluster-dependents" - - var c *clusterd.Context - - newClusterdCtx := func(objects ...runtime.Object) *clusterd.Context { - dynInt := dynamicfake.NewSimpleDynamicClient(scheme, objects...) - return &clusterd.Context{ - DynamicClientset: dynInt, - } - } - - // Create objectmeta with the given name in our test namespace - meta := func(name string) v1.ObjectMeta { - return v1.ObjectMeta{ - Name: name, - Namespace: ns, - } - } - - t.Run("CephBlockPools", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephBlockPool{ObjectMeta: meta("block-pool-1")}, - &cephv1.CephBlockPool{ObjectMeta: meta("block-pool-2")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephBlockPools"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"block-pool-1", "block-pool-2"}, deps.OfPluralKind("CephBlockPools")) - }) - - t.Run("CephRBDMirrors", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephRBDMirror{ObjectMeta: meta("rbdmirror")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephRBDMirrors"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"rbdmirror"}, deps.OfPluralKind("CephRBDMirrors")) - }) - - t.Run("CephFilesystems", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephFilesystem{ObjectMeta: meta("filesystem-1")}, - &cephv1.CephFilesystem{ObjectMeta: meta("filesystem-2")}, - &cephv1.CephFilesystem{ObjectMeta: meta("filesystem-3")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephFilesystems"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"filesystem-1", "filesystem-2", "filesystem-3"}, deps.OfPluralKind("CephFilesystems")) - }) - - t.Run("CephFilesystemMirrors", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephFilesystemMirror{ObjectMeta: meta("fsmirror")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephFilesystemMirrors"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"fsmirror"}, deps.OfPluralKind("CephFilesystemMirrors")) - }) - - t.Run("CephObjectStores", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephObjectStore{ObjectMeta: meta("objectstore-1")}, - &cephv1.CephObjectStore{ObjectMeta: meta("objectstore-2")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephObjectStores"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"objectstore-1", "objectstore-2"}, deps.OfPluralKind("CephObjectStores")) - }) - - t.Run("CephObjectStoreUsers", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephObjectStoreUser{ObjectMeta: meta("u1")}, - &cephv1.CephObjectStoreUser{ObjectMeta: meta("u2")}, - &cephv1.CephObjectStoreUser{ObjectMeta: meta("u3")}, - &cephv1.CephObjectStoreUser{ObjectMeta: meta("u4")}, - &cephv1.CephObjectStoreUser{ObjectMeta: meta("u5")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephObjectStoreUsers"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"u1", "u2", "u3", "u4", "u5"}, deps.OfPluralKind("CephObjectStoreUsers")) - }) - - t.Run("CephObjectZones", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephObjectZone{ObjectMeta: meta("zone-1")}, - &cephv1.CephObjectZone{ObjectMeta: meta("zone-2")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephObjectZones"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"zone-1", "zone-2"}, deps.OfPluralKind("CephObjectZones")) - }) - - t.Run("CephObjectZoneGroups", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephObjectZoneGroup{ObjectMeta: meta("group-1")}, - &cephv1.CephObjectZoneGroup{ObjectMeta: meta("group-2")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephObjectZoneGroups"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"group-1", "group-2"}, deps.OfPluralKind("CephObjectZoneGroups")) - }) - - t.Run("CephObjectRealms", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephObjectRealm{ObjectMeta: meta("realm-1")}, - &cephv1.CephObjectRealm{ObjectMeta: meta("realm-2")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephObjectRealms"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"realm-1", "realm-2"}, deps.OfPluralKind("CephObjectRealms")) - }) - - t.Run("CephNFSes", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephNFS{ObjectMeta: meta("nfs-1")}, - &cephv1.CephNFS{ObjectMeta: meta("nfs-2")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephNFSes"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"nfs-1", "nfs-2"}, deps.OfPluralKind("CephNFSes")) - }) - - t.Run("CephClients", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephClient{ObjectMeta: meta("client-1")}, - &cephv1.CephClient{ObjectMeta: meta("client-2")}, - &cephv1.CephClient{ObjectMeta: meta("client-3")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephClients"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"client-1", "client-2", "client-3"}, deps.OfPluralKind("CephClients")) - }) - - t.Run("All", func(t *testing.T) { - c = newClusterdCtx( - &cephv1.CephBlockPool{ObjectMeta: meta("pool-1")}, - &cephv1.CephRBDMirror{ObjectMeta: meta("rbdmirror-1")}, - &cephv1.CephRBDMirror{ObjectMeta: meta("rbdmirror-2")}, - &cephv1.CephFilesystem{ObjectMeta: meta("filesystem-1")}, - &cephv1.CephFilesystemMirror{ObjectMeta: meta("fsmirror-1")}, - &cephv1.CephFilesystemMirror{ObjectMeta: meta("fsmirror-2")}, - &cephv1.CephObjectStore{ObjectMeta: meta("objectstore-1")}, - &cephv1.CephObjectStoreUser{ObjectMeta: meta("u1")}, - &cephv1.CephObjectZone{ObjectMeta: meta("zone-1")}, - &cephv1.CephObjectZoneGroup{ObjectMeta: meta("group-1")}, - &cephv1.CephObjectRealm{ObjectMeta: meta("realm-1")}, - &cephv1.CephNFS{ObjectMeta: meta("nfs-1")}, - &cephv1.CephClient{ObjectMeta: meta("client-1")}, - ) - deps, err := CephClusterDependents(c, ns) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephBlockPools", "CephRBDMirrors", "CephFilesystems", - "CephFilesystemMirrors", "CephObjectStores", "CephObjectStoreUsers", "CephObjectZones", - "CephObjectZoneGroups", "CephObjectRealms", "CephNFSes", "CephClients"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"pool-1"}, deps.OfPluralKind("CephBlockPools")) - assert.ElementsMatch(t, []string{"rbdmirror-1", "rbdmirror-2"}, deps.OfPluralKind("CephRBDMirrors")) - assert.ElementsMatch(t, []string{"filesystem-1"}, deps.OfPluralKind("CephFilesystems")) - assert.ElementsMatch(t, []string{"fsmirror-1", "fsmirror-2"}, deps.OfPluralKind("CephFilesystemMirrors")) - assert.ElementsMatch(t, []string{"objectstore-1"}, deps.OfPluralKind("CephObjectStores")) - assert.ElementsMatch(t, []string{"u1"}, deps.OfPluralKind("CephObjectStoreUsers")) - assert.ElementsMatch(t, []string{"zone-1"}, deps.OfPluralKind("CephObjectZones")) - assert.ElementsMatch(t, []string{"group-1"}, deps.OfPluralKind("CephObjectZoneGroups")) - assert.ElementsMatch(t, []string{"realm-1"}, deps.OfPluralKind("CephObjectRealms")) - assert.ElementsMatch(t, []string{"nfs-1"}, deps.OfPluralKind("CephNFSes")) - assert.ElementsMatch(t, []string{"client-1"}, deps.OfPluralKind("CephClients")) - - t.Run("and no dependencies in another namespace", func(t *testing.T) { - deps, err := CephClusterDependents(c, "other-namespace") - assert.NoError(t, err) - assert.True(t, deps.Empty()) - }) - }) - - t.Run("With errors", func(t *testing.T) { - dynInt := dynamicfake.NewSimpleDynamicClient(scheme, - &cephv1.CephBlockPool{ObjectMeta: meta("pool-1")}, - &cephv1.CephFilesystem{ObjectMeta: meta("filesystem-1")}, - &cephv1.CephObjectStore{ObjectMeta: meta("objectstore-1")}, - ) - // add reactor to cause failures when listing block and nfs (but not object, fs, or any others) - var listReactor k8stesting.ReactionFunc = func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { - r := action.GetResource().Resource - if r == "cephblockpools" || r == "cephnfses" { - return true, nil, errors.Errorf("fake error listing %q", r) - } - return false, nil, nil - } - dynInt.PrependReactor("list", "*", listReactor) - c := &clusterd.Context{ - DynamicClientset: dynInt, - } - deps, err := CephClusterDependents(c, ns) - assert.Error(t, err) - assert.Contains(t, err.Error(), "CephBlockPools") - assert.Contains(t, err.Error(), "CephNFSes") - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"CephFilesystems", "CephObjectStores"}, deps.PluralKinds()) - assert.ElementsMatch(t, []string{"filesystem-1"}, deps.OfPluralKind("CephFilesystems")) - assert.ElementsMatch(t, []string{"objectstore-1"}, deps.OfPluralKind("CephObjectStores")) - }) -} diff --git a/pkg/operator/ceph/cluster/mgr/config.go b/pkg/operator/ceph/cluster/mgr/config.go deleted file mode 100644 index ae69cf77c..000000000 --- a/pkg/operator/ceph/cluster/mgr/config.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mgr - -import ( - "context" - "fmt" - - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - keyringTemplate = ` -[mgr.%s] - key = %s - caps mon = "allow profile mgr" - caps mds = "allow *" - caps osd = "allow *" -` -) - -// mgrConfig for a single mgr -type mgrConfig struct { - ResourceName string // the name rook gives to mgr resources in k8s metadata - DaemonID string // the ID of the Ceph daemon ("a", "b", ...) - DataPathMap *config.DataPathMap // location to store data in container -} - -func (c *Cluster) dashboardPort() int { - if c.spec.Dashboard.Port == 0 { - // default port for HTTP/HTTPS - if c.spec.Dashboard.SSL { - return dashboardPortHTTPS - } else { - return dashboardPortHTTP - } - } - // crd validates port >= 0 - return c.spec.Dashboard.Port -} - -func (c *Cluster) generateKeyring(m *mgrConfig) (string, error) { - ctx := context.TODO() - user := fmt.Sprintf("mgr.%s", m.DaemonID) - access := []string{"mon", "allow profile mgr", "mds", "allow *", "osd", "allow *"} - s := keyring.GetSecretStore(c.context, c.clusterInfo, c.clusterInfo.OwnerInfo) - - key, err := s.GenerateKey(user, access) - if err != nil { - return "", err - } - - // Delete legacy key store for upgrade from Rook v0.9.x to v1.0.x - err = c.context.Clientset.CoreV1().Secrets(c.clusterInfo.Namespace).Delete(ctx, m.ResourceName, metav1.DeleteOptions{}) - if err != nil { - if errors.IsNotFound(err) { - logger.Debugf("legacy mgr key %q is already removed", m.ResourceName) - } else { - logger.Warningf("legacy mgr key %q could not be removed. %v", m.ResourceName, err) - } - } - - keyring := fmt.Sprintf(keyringTemplate, m.DaemonID, key) - return keyring, s.CreateOrUpdate(m.ResourceName, keyring) -} diff --git a/pkg/operator/ceph/cluster/mgr/dashboard.go b/pkg/operator/ceph/cluster/mgr/dashboard.go deleted file mode 100644 index b8f66f02f..000000000 --- a/pkg/operator/ceph/cluster/mgr/dashboard.go +++ /dev/null @@ -1,350 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package mgr for the Ceph manager. -package mgr - -import ( - "context" - "crypto/rand" - "fmt" - "io/ioutil" - "os" - "strconv" - "syscall" - "time" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - dashboardModuleName = "dashboard" - dashboardPortHTTPS = 8443 - dashboardPortHTTP = 7000 - dashboardUsername = "admin" - // #nosec because of the word `Password` - dashboardPasswordName = "rook-ceph-dashboard-password" - passwordLength = 20 - passwordKeyName = "password" - certAlreadyConfiguredErrorCode = 5 - invalidArgErrorCode = int(syscall.EINVAL) -) - -var ( - dashboardInitWaitTime = 5 * time.Second -) - -func (c *Cluster) configureDashboardService(activeDaemon string) error { - ctx := context.TODO() - dashboardService, err := c.makeDashboardService(AppName, activeDaemon) - if err != nil { - return err - } - if c.spec.Dashboard.Enabled { - // expose the dashboard service - if _, err := k8sutil.CreateOrUpdateService(c.context.Clientset, c.clusterInfo.Namespace, dashboardService); err != nil { - return errors.Wrap(err, "failed to configure dashboard svc") - } - } else { - // delete the dashboard service if it exists - err := c.context.Clientset.CoreV1().Services(c.clusterInfo.Namespace).Delete(ctx, dashboardService.Name, metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return errors.Wrap(err, "failed to delete dashboard service") - } - } - - return nil -} - -// Ceph docs about the dashboard module: http://docs.ceph.com/docs/nautilus/mgr/dashboard/ -func (c *Cluster) configureDashboardModules() error { - if c.spec.Dashboard.Enabled { - if err := client.MgrEnableModule(c.context, c.clusterInfo, dashboardModuleName, true); err != nil { - return errors.Wrap(err, "failed to enable mgr dashboard module") - } - } else { - if err := client.MgrDisableModule(c.context, c.clusterInfo, dashboardModuleName); err != nil { - logger.Errorf("failed to disable mgr dashboard module. %v", err) - } - return nil - } - - hasChanged, err := c.initializeSecureDashboard() - if err != nil { - return errors.Wrap(err, "failed to initialize dashboard") - } - - for _, daemonID := range c.getDaemonIDs() { - changed, err := c.configureDashboardModuleSettings(daemonID) - if err != nil { - return err - } - if changed { - hasChanged = true - } - } - if hasChanged { - logger.Info("dashboard config has changed. restarting the dashboard module") - return c.restartDashboard() - } - return nil -} - -func (c *Cluster) configureDashboardModuleSettings(daemonID string) (bool, error) { - monStore := config.GetMonStore(c.context, c.clusterInfo) - - daemonID = fmt.Sprintf("mgr.%s", daemonID) - - // url prefix - hasChanged, err := monStore.SetIfChanged(daemonID, "mgr/dashboard/url_prefix", c.spec.Dashboard.URLPrefix) - if err != nil { - return false, err - } - - // ssl support - ssl := strconv.FormatBool(c.spec.Dashboard.SSL) - changed, err := monStore.SetIfChanged(daemonID, "mgr/dashboard/ssl", ssl) - if err != nil { - return false, err - } - hasChanged = hasChanged || changed - - // server port - port := strconv.Itoa(c.dashboardPort()) - changed, err = monStore.SetIfChanged(daemonID, "mgr/dashboard/server_port", port) - if err != nil { - return false, err - } - hasChanged = hasChanged || changed - - // SSL enabled. Needed to set specifically the ssl port setting - if c.spec.Dashboard.SSL { - changed, err = monStore.SetIfChanged(daemonID, "mgr/dashboard/ssl_server_port", port) - if err != nil { - return false, err - } - hasChanged = hasChanged || changed - } - - return hasChanged, nil -} - -func (c *Cluster) initializeSecureDashboard() (bool, error) { - // we need to wait a short period after enabling the module before we can call the `ceph dashboard` commands. - time.Sleep(dashboardInitWaitTime) - - password, err := c.getOrGenerateDashboardPassword() - if err != nil { - return false, errors.Wrap(err, "failed to generate a password for the ceph dashboard") - } - - if c.spec.Dashboard.SSL { - alreadyCreated, err := c.createSelfSignedCert() - if err != nil { - return false, errors.Wrap(err, "failed to create a self signed cert for the ceph dashboard") - } - if alreadyCreated { - return false, nil - } - } - - if err := c.setLoginCredentials(password); err != nil { - return false, errors.Wrap(err, "failed to set login credentials for the ceph dashboard") - } - - return false, nil -} - -func (c *Cluster) createSelfSignedCert() (bool, error) { - // create a self-signed cert for the https connections - args := []string{"dashboard", "create-self-signed-cert"} - - // retry a few times in the case that the mgr module is not ready to accept commands - for i := 0; i < 5; i++ { - _, err := client.NewCephCommand(c.context, c.clusterInfo, args).RunWithTimeout(exec.CephCommandsTimeout) - if err == context.DeadlineExceeded { - logger.Warning("cert creation timed out. trying again") - continue - } - if err != nil { - exitCode, parsed := c.exitCode(err) - if parsed { - if exitCode == certAlreadyConfiguredErrorCode { - logger.Info("dashboard is already initialized with a cert") - return true, nil - } - if exitCode == invalidArgErrorCode { - logger.Info("dashboard module is not ready yet. trying again") - time.Sleep(dashboardInitWaitTime) - continue - } - } - return false, errors.Wrap(err, "failed to create self signed cert on mgr") - } - break - } - return false, nil -} - -// FileBasedPasswordSupported check if Ceph versions have the latest Ceph dashboard command -func FileBasedPasswordSupported(c *client.ClusterInfo) bool { - if (c.CephVersion.IsNautilus() && c.CephVersion.IsAtLeast(cephver.CephVersion{Major: 14, Minor: 2, Extra: 17})) || - (c.CephVersion.IsOctopus() && c.CephVersion.IsAtLeast(cephver.CephVersion{Major: 15, Minor: 2, Extra: 10})) || - c.CephVersion.IsAtLeastPacific() { - return true - } - return false -} - -func CreateTempPasswordFile(password string) (*os.File, error) { - // Generate a temp file - file, err := ioutil.TempFile("", "") - if err != nil { - return nil, errors.Wrap(err, "failed to generate temp file") - } - - // Write password into file - err = ioutil.WriteFile(file.Name(), []byte(password), 0440) - if err != nil { - return nil, errors.Wrap(err, "failed to write dashboard password into file") - } - return file, nil -} - -func (c *Cluster) setLoginCredentials(password string) error { - // Set the login credentials. Write the command/args to the debug log so we don't write the password by default to the log. - logger.Infof("setting ceph dashboard %q login creds", dashboardUsername) - - var args []string - // for latest Ceph versions - if FileBasedPasswordSupported(c.clusterInfo) { - // Generate a temp file - file, err := CreateTempPasswordFile(password) - if err != nil { - return errors.Wrap(err, "failed to create a temporary dashboard password file") - } - args = []string{"dashboard", "ac-user-create", dashboardUsername, "-i", file.Name(), "administrator"} - defer func() { - if err := os.Remove(file.Name()); err != nil { - logger.Errorf("failed to clean up dashboard password file %q. %v", file.Name(), err) - } - }() - } else { - // for older Ceph versions - args = []string{"dashboard", "set-login-credentials", dashboardUsername, password} - } - - _, err := client.ExecuteCephCommandWithRetry(func() (string, []byte, error) { - output, err := client.NewCephCommand(c.context, c.clusterInfo, args).RunWithTimeout(exec.CephCommandsTimeout) - return "set dashboard creds", output, err - }, c.exitCode, 5, invalidArgErrorCode, dashboardInitWaitTime) - if err != nil { - return errors.Wrap(err, "failed to set login creds on mgr") - } - - logger.Info("successfully set ceph dashboard creds") - return nil -} - -func (c *Cluster) getOrGenerateDashboardPassword() (string, error) { - ctx := context.TODO() - secret, err := c.context.Clientset.CoreV1().Secrets(c.clusterInfo.Namespace).Get(ctx, dashboardPasswordName, metav1.GetOptions{}) - if err == nil { - logger.Info("the dashboard secret was already generated") - return decodeSecret(secret) - } - if !kerrors.IsNotFound(err) { - return "", errors.Wrap(err, "failed to get dashboard secret") - } - - // Generate a password - password, err := GeneratePassword(passwordLength) - if err != nil { - return "", errors.Wrap(err, "failed to generate password") - } - - // Store the keyring in a secret - secrets := map[string][]byte{ - passwordKeyName: []byte(password), - } - secret = &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: dashboardPasswordName, - Namespace: c.clusterInfo.Namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - err = c.clusterInfo.OwnerInfo.SetControllerReference(secret) - if err != nil { - return "", errors.Wrapf(err, "failed to set owner reference to dashboard secret %q", secret.Name) - } - - _, err = c.context.Clientset.CoreV1().Secrets(c.clusterInfo.Namespace).Create(ctx, secret, metav1.CreateOptions{}) - if err != nil { - return "", errors.Wrap(err, "failed to save dashboard secret") - } - return password, nil -} - -func GeneratePassword(length int) (string, error) { - // #nosec because of the word password - const passwordChars = "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~" - passwd, err := GenerateRandomBytes(length) - if err != nil { - return "", errors.Wrap(err, "failed to generate password") - } - for i, pass := range passwd { - passwd[i] = passwordChars[pass%byte(len(passwordChars))] - } - return string(passwd), nil -} - -// GenerateRandomBytes returns securely generated random bytes. -func GenerateRandomBytes(length int) ([]byte, error) { - bytes := make([]byte, length) - if _, err := rand.Read(bytes); err != nil { - return nil, errors.Wrap(err, "failed to generate random bytes") - } - return bytes, nil -} - -func decodeSecret(secret *v1.Secret) (string, error) { - password, ok := secret.Data[passwordKeyName] - if !ok { - return "", errors.New("password not found in secret") - } - return string(password), nil -} - -func (c *Cluster) restartDashboard() error { - logger.Info("restarting the mgr module") - if err := client.MgrDisableModule(c.context, c.clusterInfo, dashboardModuleName); err != nil { - return errors.Wrapf(err, "failed to disable mgr module %q.", dashboardModuleName) - } - if err := client.MgrEnableModule(c.context, c.clusterInfo, dashboardModuleName, true); err != nil { - return errors.Wrapf(err, "failed to enable mgr module %q.", dashboardModuleName) - } - return nil -} diff --git a/pkg/operator/ceph/cluster/mgr/dashboard_test.go b/pkg/operator/ceph/cluster/mgr/dashboard_test.go deleted file mode 100644 index 9cc2b7c13..000000000 --- a/pkg/operator/ceph/cluster/mgr/dashboard_test.go +++ /dev/null @@ -1,186 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package mgr - -import ( - "context" - "testing" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestGeneratePassword(t *testing.T) { - password, err := GeneratePassword(0) - require.Nil(t, err) - assert.Equal(t, "", password) - - password, err = GeneratePassword(1) - require.Nil(t, err) - assert.Equal(t, 1, len(password)) - logger.Infof("password: %s", password) - - password, err = GeneratePassword(10) - require.Nil(t, err) - assert.Equal(t, 10, len(password)) - logger.Infof("password: %s", password) -} - -func TestGetOrGeneratePassword(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - clusterInfo := &cephclient.ClusterInfo{Namespace: "myns", OwnerInfo: ownerInfo} - c := &Cluster{context: &clusterd.Context{Clientset: clientset}, clusterInfo: clusterInfo} - _, err := c.context.Clientset.CoreV1().Secrets(clusterInfo.Namespace).Get(ctx, dashboardPasswordName, metav1.GetOptions{}) - assert.True(t, kerrors.IsNotFound(err)) - - // Generate a password - password, err := c.getOrGenerateDashboardPassword() - require.Nil(t, err) - assert.Equal(t, passwordLength, len(password)) - - secret, err := c.context.Clientset.CoreV1().Secrets(clusterInfo.Namespace).Get(ctx, dashboardPasswordName, metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, 1, len(secret.Data)) - passwordFromSecret, err := decodeSecret(secret) - assert.NoError(t, err) - assert.Equal(t, password, passwordFromSecret) - - // We should retrieve the same password on the second call - retrievedPassword, err := c.getOrGenerateDashboardPassword() - assert.Nil(t, err) - assert.Equal(t, password, retrievedPassword) -} - -func TestStartSecureDashboard(t *testing.T) { - ctx := context.TODO() - enables := 0 - disables := 0 - moduleRetries := 0 - exitCodeResponse := 0 - clientset := test.New(t, 3) - mockFN := func(command string, args ...string) (string, error) { - logger.Infof("command: %s %v", command, args) - exitCodeResponse = 0 - if args[1] == "module" { - if args[2] == "enable" { - enables++ - } else if args[2] == "disable" { - disables++ - } - } - if args[0] == "dashboard" && args[1] == "create-self-signed-cert" { - if moduleRetries < 2 { - logger.Infof("simulating retry...") - exitCodeResponse = invalidArgErrorCode - moduleRetries++ - return "", errors.New("test failure") - } - } - return "", nil - } - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: mockFN, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, arg ...string) (string, error) { - return mockFN(command, arg...) - }, - } - - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - clusterInfo := &cephclient.ClusterInfo{ - Namespace: "myns", - CephVersion: cephver.Nautilus, - OwnerInfo: ownerInfo, - } - c := &Cluster{clusterInfo: clusterInfo, context: &clusterd.Context{Clientset: clientset, Executor: executor}, - spec: cephv1.ClusterSpec{ - Dashboard: cephv1.DashboardSpec{Port: dashboardPortHTTP, Enabled: true, SSL: true}, - CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:v15"}, - }, - } - c.exitCode = func(err error) (int, bool) { - if exitCodeResponse != 0 { - return exitCodeResponse, true - } - return exitCodeResponse, false - } - - dashboardInitWaitTime = 0 - err := c.configureDashboardService("a") - assert.NoError(t, err) - err = c.configureDashboardModules() - assert.NoError(t, err) - // the dashboard is enabled once with the new dashboard and modules - assert.Equal(t, 2, enables) - assert.Equal(t, 1, disables) - assert.Equal(t, 2, moduleRetries) - - svc, err := c.context.Clientset.CoreV1().Services(clusterInfo.Namespace).Get(ctx, "rook-ceph-mgr-dashboard", metav1.GetOptions{}) - assert.Nil(t, err) - assert.NotNil(t, svc) - - // disable the dashboard - c.spec.Dashboard.Enabled = false - err = c.configureDashboardService("a") - assert.Nil(t, err) - err = c.configureDashboardModules() - assert.NoError(t, err) - assert.Equal(t, 2, enables) - assert.Equal(t, 2, disables) - - svc, err = c.context.Clientset.CoreV1().Services(clusterInfo.Namespace).Get(ctx, "rook-ceph-mgr-dashboard", metav1.GetOptions{}) - assert.NotNil(t, err) - assert.True(t, kerrors.IsNotFound(err)) - assert.Nil(t, svc) -} - -func TestFileBasedPasswordSupported(t *testing.T) { - // for Ceph version Nautilus 14.2.17 - clusterInfo := &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 17}} - value := FileBasedPasswordSupported(clusterInfo) - assert.True(t, value) - - // for Ceph version Octopus 15.2.10 - clusterInfo = &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 15, Minor: 2, Extra: 10}} - value = FileBasedPasswordSupported(clusterInfo) - assert.True(t, value) - - // for Ceph version Pacific - clusterInfo = &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 16, Minor: 0, Extra: 0}} - value = FileBasedPasswordSupported(clusterInfo) - assert.True(t, value) - - // for Ceph version Quincy - clusterInfo = &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}} - value = FileBasedPasswordSupported(clusterInfo) - assert.True(t, value) - - // for other Ceph Versions - clusterInfo = &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 15}} - value = FileBasedPasswordSupported(clusterInfo) - assert.False(t, value) -} diff --git a/pkg/operator/ceph/cluster/mgr/mgr.go b/pkg/operator/ceph/cluster/mgr/mgr.go deleted file mode 100644 index e70108b78..000000000 --- a/pkg/operator/ceph/cluster/mgr/mgr.go +++ /dev/null @@ -1,533 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package mgr for the Ceph manager. -package mgr - -import ( - "context" - "fmt" - "path" - "strconv" - "strings" - - "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - v1 "k8s.io/api/apps/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-mgr") - -var prometheusRuleName = "prometheus-ceph-vVERSION-rules" - -// PrometheusExternalRuleName is the name of the prometheus external rule -var PrometheusExternalRuleName = "prometheus-ceph-vVERSION-rules-external" - -const ( - AppName = "rook-ceph-mgr" - serviceAccountName = "rook-ceph-mgr" - maxMgrCount = 2 - PrometheusModuleName = "prometheus" - crashModuleName = "crash" - PgautoscalerModuleName = "pg_autoscaler" - balancerModuleName = "balancer" - balancerModuleMode = "upmap" - monitoringPath = "/etc/ceph-monitoring/" - serviceMonitorFile = "service-monitor.yaml" - // minimum amount of memory in MB to run the pod - cephMgrPodMinimumMemory uint64 = 512 - // DefaultMetricsPort prometheus exporter port - DefaultMetricsPort uint16 = 9283 -) - -// Cluster represents the Rook and environment configuration settings needed to set up Ceph mgrs. -type Cluster struct { - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - rookVersion string - exitCode func(err error) (int, bool) - spec cephv1.ClusterSpec -} - -// New creates an instance of the mgr -func New(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, spec cephv1.ClusterSpec, rookVersion string) *Cluster { - return &Cluster{ - context: context, - clusterInfo: clusterInfo, - spec: spec, - rookVersion: rookVersion, - exitCode: exec.ExitStatus, - } -} - -var waitForDeploymentToStart = k8sutil.WaitForDeploymentToStart -var updateDeploymentAndWait = mon.UpdateCephDeploymentAndWait - -// for backward compatibility, default to 1 mgr -func (c *Cluster) getReplicas() int { - replicas := c.spec.Mgr.Count - if replicas == 0 { - replicas = 1 - } - return replicas -} - -func (c *Cluster) getDaemonIDs() []string { - var daemonIDs []string - replicas := c.getReplicas() - if replicas > maxMgrCount { - replicas = maxMgrCount - } - for i := 0; i < replicas; i++ { - daemonIDs = append(daemonIDs, k8sutil.IndexToName(i)) - } - return daemonIDs -} - -// Start begins the process of running a cluster of Ceph mgrs. -func (c *Cluster) Start() error { - ctx := context.TODO() - // Validate pod's memory if specified - err := controller.CheckPodMemory(cephv1.ResourcesKeyMgr, cephv1.GetMgrResources(c.spec.Resources), cephMgrPodMinimumMemory) - if err != nil { - return errors.Wrap(err, "error checking pod memory") - } - - logger.Infof("start running mgr") - daemonIDs := c.getDaemonIDs() - var deploymentsToWaitFor []*v1.Deployment - - for _, daemonID := range daemonIDs { - // Check whether we need to cancel the orchestration - if err := controller.CheckForCancelledOrchestration(c.context); err != nil { - return err - } - - resourceName := fmt.Sprintf("%s-%s", AppName, daemonID) - mgrConfig := &mgrConfig{ - DaemonID: daemonID, - ResourceName: resourceName, - DataPathMap: config.NewStatelessDaemonDataPathMap(config.MgrType, daemonID, c.clusterInfo.Namespace, c.spec.DataDirHostPath), - } - - // We set the owner reference of the Secret to the Object controller instead of the replicaset - // because we watch for that resource and reconcile if anything happens to it - _, err := c.generateKeyring(mgrConfig) - if err != nil { - return errors.Wrapf(err, "failed to generate keyring for %q", resourceName) - } - - // start the deployment - d, err := c.makeDeployment(mgrConfig) - if err != nil { - return errors.Wrapf(err, "failed to create deployment") - } - - // Set the deployment hash as an annotation - err = patch.DefaultAnnotator.SetLastAppliedAnnotation(d) - if err != nil { - return errors.Wrapf(err, "failed to set annotation for deployment %q", d.Name) - } - - newDeployment, err := c.context.Clientset.AppsV1().Deployments(c.clusterInfo.Namespace).Create(ctx, d, metav1.CreateOptions{}) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create mgr deployment %s", resourceName) - } - logger.Infof("deployment for mgr %s already exists. updating if needed", resourceName) - - if err := updateDeploymentAndWait(c.context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - } - } else { - // wait for the new deployment - deploymentsToWaitFor = append(deploymentsToWaitFor, newDeployment) - } - } - - // If the mgr is newly created, wait for it to start before continuing with the service and - // module configuration - for _, d := range deploymentsToWaitFor { - if err := waitForDeploymentToStart(c.context, d); err != nil { - return errors.Wrapf(err, "failed to wait for mgr %q to start", d.Name) - } - } - - // check if any extra mgrs need to be removed - c.removeExtraMgrs(daemonIDs) - - activeMgr := daemonIDs[0] - if len(daemonIDs) > 1 { - // When multiple mgrs are running, the mgr sidecar for the active mgr - // will create the services. However, the sidecar will only reconcile all - // the services when the active mgr changes. Here as part of the regular reconcile - // we trigger reconciling all the services to ensure they are current. - activeMgr, err = c.getActiveMgr() - if err != nil || activeMgr == "" { - activeMgr = "" - logger.Infof("cannot reconcile mgr services, no active mgr found. err=%v", err) - } - } - if activeMgr != "" { - if err := c.reconcileServices(activeMgr); err != nil { - return errors.Wrap(err, "failed to enable mgr services") - } - } - - // configure the mgr modules - c.configureModules(daemonIDs) - - // enable monitoring if `monitoring: enabled: true` - if c.spec.Monitoring.Enabled { - // namespace in which the prometheusRule should be deployed - // if left empty, it will be deployed in current namespace - namespace := c.spec.Monitoring.RulesNamespace - if namespace == "" { - namespace = c.clusterInfo.Namespace - } - if err := c.DeployPrometheusRule(prometheusRuleName, namespace); err != nil { - logger.Errorf("failed to deploy prometheus rule. %v", err) - } else { - logger.Infof("prometheusRule deployed") - } - logger.Debugf("ended monitoring deployment") - } - return nil -} - -func (c *Cluster) removeExtraMgrs(daemonIDs []string) { - // In case the mgr count was reduced, delete the extra mgrs - for i := maxMgrCount - 1; i >= len(daemonIDs); i-- { - mgrName := fmt.Sprintf("%s-%s", AppName, k8sutil.IndexToName(i)) - err := c.context.Clientset.AppsV1().Deployments(c.clusterInfo.Namespace).Delete(context.TODO(), mgrName, metav1.DeleteOptions{}) - if err == nil { - logger.Infof("removed extra mgr %q", mgrName) - } else if !kerrors.IsNotFound(err) { - logger.Warningf("failed to remove extra mgr %q. %v", mgrName, err) - } - } -} - -// ReconcileActiveMgrServices reconciles the services if the active mgr is the one running -// in the sidecar -func (c *Cluster) ReconcileActiveMgrServices(daemonNameToUpdate string) error { - // If the services are already set to this daemon, no need to attempt to update - svc, err := c.context.Clientset.CoreV1().Services(c.clusterInfo.Namespace).Get(context.TODO(), AppName, metav1.GetOptions{}) - if err != nil { - logger.Errorf("failed to check current mgr service, proceeding to update. %v", err) - } else { - currentDaemon := svc.Spec.Selector[controller.DaemonIDLabel] - if currentDaemon == daemonNameToUpdate { - logger.Infof("mgr services already set to daemon %q, no need to update", daemonNameToUpdate) - return nil - } - logger.Infof("mgr service currently set to %q, checking if need to update to %q", currentDaemon, daemonNameToUpdate) - } - - activeName, err := c.getActiveMgr() - if err != nil { - return err - } - if activeName == "" { - return errors.New("active mgr not found") - } - if daemonNameToUpdate != activeName { - logger.Infof("no need for the mgr update since the active mgr is %q, rather than the local mgr %q", activeName, daemonNameToUpdate) - return nil - } - - return c.reconcileServices(activeName) -} - -func (c *Cluster) getActiveMgr() (string, error) { - // The preferred way to query the active mgr is "ceph mgr stat", which is only available in pacific or newer - if c.clusterInfo.CephVersion.IsAtLeastPacific() { - mgrStat, err := cephclient.CephMgrStat(c.context, c.clusterInfo) - if err != nil { - return "", errors.Wrap(err, "failed to get mgr stat for the active mgr") - } - return mgrStat.ActiveName, nil - } - - // The legacy way to query the active mgr is with the verbose "ceph mgr dump" - mgrMap, err := cephclient.CephMgrMap(c.context, c.clusterInfo) - if err != nil { - return "", errors.Wrap(err, "failed to get mgr map for the active mgr") - } - - return mgrMap.ActiveName, nil -} - -// reconcile the services, if the active mgr is not detected, use the default mgr -func (c *Cluster) reconcileServices(activeDaemon string) error { - logger.Infof("setting services to point to mgr %q", activeDaemon) - - if err := c.configureDashboardService(activeDaemon); err != nil { - return errors.Wrap(err, "failed to configure dashboard svc") - } - - // create the metrics service - service, err := c.MakeMetricsService(AppName, activeDaemon, serviceMetricName) - if err != nil { - return err - } - if _, err := k8sutil.CreateOrUpdateService(c.context.Clientset, c.clusterInfo.Namespace, service); err != nil { - return errors.Wrap(err, "failed to create mgr metrics service") - } - - // enable monitoring if `monitoring: enabled: true` - if c.spec.Monitoring.Enabled { - if err := c.EnableServiceMonitor(activeDaemon); err != nil { - return errors.Wrap(err, "failed to enable service monitor") - } - } - - return nil -} - -func (c *Cluster) configureModules(daemonIDs []string) { - // Configure the modules asynchronously so we can complete all the configuration much sooner. - startModuleConfiguration("prometheus", c.enablePrometheusModule) - startModuleConfiguration("dashboard", c.configureDashboardModules) - // "crash" is part of the "always_on_modules" list as of Octopus - if !c.clusterInfo.CephVersion.IsAtLeastOctopus() { - startModuleConfiguration("crash", c.enableCrashModule) - } else { - // The balancer module must be configured on Octopus - // It is a bit confusing but as of Octopus modules that are in the "always_on_modules" list - // are "just" enabled, but still they must be configured to work properly - startModuleConfiguration("balancer", c.enableBalancerModule) - } - startModuleConfiguration("mgr module(s) from the spec", c.configureMgrModules) -} - -func startModuleConfiguration(description string, configureModules func() error) { - go func() { - err := configureModules() - if err != nil { - logger.Errorf("failed modules: %q. %v", description, err) - } else { - logger.Infof("successful modules: %s", description) - } - }() -} - -// Ceph docs about the prometheus module: http://docs.ceph.com/docs/master/mgr/prometheus/ -func (c *Cluster) enablePrometheusModule() error { - if err := cephclient.MgrEnableModule(c.context, c.clusterInfo, PrometheusModuleName, true); err != nil { - return errors.Wrap(err, "failed to enable mgr prometheus module") - } - return nil -} - -// Ceph docs about the crash module: https://docs.ceph.com/docs/master/mgr/crash/ -func (c *Cluster) enableCrashModule() error { - if err := cephclient.MgrEnableModule(c.context, c.clusterInfo, crashModuleName, true); err != nil { - return errors.Wrap(err, "failed to enable mgr crash module") - } - return nil -} - -func (c *Cluster) enableBalancerModule() error { - // The order MATTERS, always configure this module first, then turn it on - - // This sets min compat client to luminous and the balancer module mode - err := cephclient.ConfigureBalancerModule(c.context, c.clusterInfo, balancerModuleMode) - if err != nil { - return errors.Wrapf(err, "failed to configure module %q", balancerModuleName) - } - - // This turns "on" the balancer - err = cephclient.MgrEnableModule(c.context, c.clusterInfo, balancerModuleName, false) - if err != nil { - return errors.Wrapf(err, "failed to turn on mgr %q module", balancerModuleName) - } - - return nil -} - -func (c *Cluster) configureMgrModules() error { - // Enable mgr modules from the spec - for _, module := range c.spec.Mgr.Modules { - if module.Name == "" { - return errors.New("name not specified for the mgr module configuration") - } - if wellKnownModule(module.Name) { - return errors.Errorf("cannot configure mgr module %q that is configured with other cluster settings", module.Name) - } - minVersion, versionOK := c.moduleMeetsMinVersion(module.Name) - if !versionOK { - return errors.Errorf("module %q cannot be configured because it requires at least Ceph version %q", module.Name, minVersion.String()) - } - - if module.Enabled { - if module.Name == balancerModuleName { - // Configure balancer module mode - err := cephclient.ConfigureBalancerModule(c.context, c.clusterInfo, balancerModuleMode) - if err != nil { - return errors.Wrapf(err, "failed to configure module %q", module.Name) - } - } - - if err := cephclient.MgrEnableModule(c.context, c.clusterInfo, module.Name, false); err != nil { - return errors.Wrapf(err, "failed to enable mgr module %q", module.Name) - } - - // Configure special settings for individual modules that are enabled - switch module.Name { - case PgautoscalerModuleName: - monStore := config.GetMonStore(c.context, c.clusterInfo) - // Ceph Octopus will have that option enabled - err := monStore.Set("global", "osd_pool_default_pg_autoscale_mode", "on") - if err != nil { - return errors.Wrap(err, "failed to enable pg autoscale mode for newly created pools") - } - err = monStore.Set("global", "mon_pg_warn_min_per_osd", "0") - if err != nil { - return errors.Wrap(err, "failed to set minimal number PGs per (in) osd before we warn the admin to") - } - case rookModuleName: - startModuleConfiguration("orchestrator modules", c.configureOrchestratorModules) - } - - } else { - if err := cephclient.MgrDisableModule(c.context, c.clusterInfo, module.Name); err != nil { - return errors.Wrapf(err, "failed to disable mgr module %q", module.Name) - } - } - } - - return nil -} - -func (c *Cluster) moduleMeetsMinVersion(name string) (*cephver.CephVersion, bool) { - minVersions := map[string]cephver.CephVersion{ - // Put the modules here, example: - // pgautoscalerModuleName: {Major: 14}, - } - if ver, ok := minVersions[name]; ok { - // Check if the required min version is met - return &ver, c.clusterInfo.CephVersion.IsAtLeast(ver) - } - // no min version was required - return nil, true -} - -func wellKnownModule(name string) bool { - knownModules := []string{dashboardModuleName, PrometheusModuleName, crashModuleName} - for _, known := range knownModules { - if name == known { - return true - } - } - return false -} - -// EnableServiceMonitor add a servicemonitor that allows prometheus to scrape from the monitoring endpoint of the cluster -func (c *Cluster) EnableServiceMonitor(activeDaemon string) error { - serviceMonitor, err := k8sutil.GetServiceMonitor(path.Join(monitoringPath, serviceMonitorFile)) - if err != nil { - return errors.Wrap(err, "service monitor could not be enabled") - } - serviceMonitor.SetName(AppName) - serviceMonitor.SetNamespace(c.clusterInfo.Namespace) - cephv1.GetMonitoringLabels(c.spec.Labels).ApplyToObjectMeta(&serviceMonitor.ObjectMeta) - - if c.spec.External.Enable { - serviceMonitor.Spec.Endpoints[0].Port = controller.ServiceExternalMetricName - } - err = c.clusterInfo.OwnerInfo.SetControllerReference(serviceMonitor) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to service monitor %q", serviceMonitor.Name) - } - serviceMonitor.Spec.NamespaceSelector.MatchNames = []string{c.clusterInfo.Namespace} - serviceMonitor.Spec.Selector.MatchLabels = c.selectorLabels(activeDaemon) - - applyMonitoringLabels(c, serviceMonitor) - - if _, err = k8sutil.CreateOrUpdateServiceMonitor(serviceMonitor); err != nil { - return errors.Wrap(err, "service monitor could not be enabled") - } - return nil -} - -// DeployPrometheusRule deploy prometheusRule that adds alerting and/or recording rules to the cluster -func (c *Cluster) DeployPrometheusRule(name, namespace string) error { - version := strconv.Itoa(c.clusterInfo.CephVersion.Major) - name = strings.Replace(name, "VERSION", version, 1) - prometheusRuleFile := name + ".yaml" - prometheusRuleFile = path.Join(monitoringPath, prometheusRuleFile) - prometheusRule, err := k8sutil.GetPrometheusRule(prometheusRuleFile) - if err != nil { - return errors.Wrap(err, "prometheus rule could not be deployed") - } - prometheusRule.SetName(name) - prometheusRule.SetNamespace(namespace) - err = c.clusterInfo.OwnerInfo.SetControllerReference(prometheusRule) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to prometheus rule %q", prometheusRule.Name) - } - cephv1.GetMonitoringLabels(c.spec.Labels).ApplyToObjectMeta(&prometheusRule.ObjectMeta) - if _, err := k8sutil.CreateOrUpdatePrometheusRule(prometheusRule); err != nil { - return errors.Wrap(err, "prometheus rule could not be deployed") - } - return nil -} - -// IsModuleInSpec returns whether a module is present in the CephCluster manager spec -func IsModuleInSpec(modules []cephv1.Module, moduleName string) bool { - for _, v := range modules { - if v.Name == moduleName { - return true - } - } - - return false -} - -// ApplyMonitoringLabels function adds the name of the resource that manages -// cephcluster, as a label on the ceph metrics -func applyMonitoringLabels(c *Cluster, serviceMonitor *monitoringv1.ServiceMonitor) { - if c.spec.Labels != nil { - if monitoringLabels, ok := c.spec.Labels["monitoring"]; ok { - if managedBy, ok := monitoringLabels["rook.io/managedBy"]; ok { - relabelConfig := monitoringv1.RelabelConfig{ - TargetLabel: "managedBy", - Replacement: managedBy, - } - serviceMonitor.Spec.Endpoints[0].RelabelConfigs = append( - serviceMonitor.Spec.Endpoints[0].RelabelConfigs, &relabelConfig) - } else { - logger.Info("rook.io/managedBy not specified in monitoring labels") - } - } else { - logger.Info("monitoring labels not specified") - } - } -} diff --git a/pkg/operator/ceph/cluster/mgr/mgr_test.go b/pkg/operator/ceph/cluster/mgr/mgr_test.go deleted file mode 100644 index c2a5459ef..000000000 --- a/pkg/operator/ceph/cluster/mgr/mgr_test.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mgr - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - testopk8s "github.com/rook/rook/pkg/operator/k8sutil/test" - testop "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tevino/abool" - apps "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestStartMgr(t *testing.T) { - var deploymentsUpdated *[]*apps.Deployment - updateDeploymentAndWait, deploymentsUpdated = testopk8s.UpdateDeploymentAndWaitStub() - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Execute: %s %v", command, args) - if args[0] == "mgr" && args[1] == "stat" { - return `{"active_name": "a"}`, nil - } - return "{\"key\":\"mysecurekey\"}", nil - }, - } - waitForDeploymentToStart = func(clusterdContext *clusterd.Context, deployment *apps.Deployment) error { - logger.Infof("simulated mgr deployment starting") - return nil - } - - clientset := testop.New(t, 3) - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - ctx := &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset, - RequestCancelOrchestration: abool.New()} - ownerInfo := cephclient.NewMinimumOwnerInfo(t) - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns", FSID: "myfsid", OwnerInfo: ownerInfo, CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Build: 5}} - clusterInfo.SetName("test") - clusterSpec := cephv1.ClusterSpec{ - Annotations: map[rook.KeyType]rook.Annotations{cephv1.KeyMgr: {"my": "annotation"}}, - Labels: map[rook.KeyType]rook.Labels{cephv1.KeyMgr: {"my-label-key": "value"}}, - Dashboard: cephv1.DashboardSpec{Enabled: true, SSL: true}, - Mgr: cephv1.MgrSpec{Count: 1}, - PriorityClassNames: map[rook.KeyType]string{cephv1.KeyMgr: "my-priority-class"}, - DataDirHostPath: "/var/lib/rook/", - } - c := New(ctx, clusterInfo, clusterSpec, "myversion") - defer os.RemoveAll(c.spec.DataDirHostPath) - - // start a basic service - err := c.Start() - assert.Nil(t, err) - validateStart(t, c) - assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - c.spec.Dashboard.URLPrefix = "/test" - c.spec.Dashboard.Port = 12345 - err = c.Start() - assert.Nil(t, err) - validateStart(t, c) - assert.ElementsMatch(t, []string{"rook-ceph-mgr-a"}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // starting with more replicas - c.spec.Mgr.Count = 2 - c.spec.Dashboard.Enabled = false - // delete the previous mgr since the mocked test won't update the existing one - err = c.context.Clientset.AppsV1().Deployments(c.clusterInfo.Namespace).Delete(context.TODO(), "rook-ceph-mgr-a", metav1.DeleteOptions{}) - assert.Nil(t, err) - err = c.Start() - assert.Nil(t, err) - validateStart(t, c) - - c.spec.Mgr.Count = 1 - c.spec.Dashboard.Enabled = false - // clean the previous deployments - err = c.context.Clientset.AppsV1().Deployments(c.clusterInfo.Namespace).Delete(context.TODO(), "rook-ceph-mgr-a", metav1.DeleteOptions{}) - assert.Nil(t, err) - assert.Nil(t, err) - err = c.Start() - assert.Nil(t, err) - validateStart(t, c) -} - -func validateStart(t *testing.T, c *Cluster) { - mgrNames := []string{"a", "b"} - for i := 0; i < c.spec.Mgr.Count; i++ { - logger.Infof("Looking for cephmgr replica %d", i) - daemonName := mgrNames[i] - d, err := c.context.Clientset.AppsV1().Deployments(c.clusterInfo.Namespace).Get(context.TODO(), fmt.Sprintf("rook-ceph-mgr-%s", daemonName), metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, map[string]string{"my": "annotation"}, d.Spec.Template.Annotations) - assert.Contains(t, d.Spec.Template.Labels, "my-label-key") - assert.Equal(t, "my-priority-class", d.Spec.Template.Spec.PriorityClassName) - if c.spec.Mgr.Count == 1 { - assert.Equal(t, 1, len(d.Spec.Template.Spec.Containers)) - } else { - // The sidecar container is only there when multiple mgrs are enabled - assert.Equal(t, 2, len(d.Spec.Template.Spec.Containers)) - assert.Equal(t, "watch-active", d.Spec.Template.Spec.Containers[1].Name) - } - } - - // verify we have exactly the expected number of deployments and not extra - // the expected deployments were already retrieved above, but now we check for no extra deployments - options := metav1.ListOptions{LabelSelector: "app=rook-ceph-mgr"} - deployments, err := c.context.Clientset.AppsV1().Deployments(c.clusterInfo.Namespace).List(context.TODO(), options) - assert.NoError(t, err) - assert.Equal(t, c.spec.Mgr.Count, len(deployments.Items)) - - validateServices(t, c) -} - -func validateServices(t *testing.T, c *Cluster) { - _, err := c.context.Clientset.CoreV1().Services(c.clusterInfo.Namespace).Get(context.TODO(), "rook-ceph-mgr", metav1.GetOptions{}) - assert.Nil(t, err) - - ds, err := c.context.Clientset.CoreV1().Services(c.clusterInfo.Namespace).Get(context.TODO(), "rook-ceph-mgr-dashboard", metav1.GetOptions{}) - if c.spec.Dashboard.Enabled { - assert.NoError(t, err) - if c.spec.Dashboard.Port == 0 { - // port=0 -> default port - assert.Equal(t, ds.Spec.Ports[0].Port, int32(dashboardPortHTTPS)) - } else { - // non-zero ports are configured as-is - assert.Equal(t, ds.Spec.Ports[0].Port, int32(c.spec.Dashboard.Port)) - } - } else { - assert.True(t, errors.IsNotFound(err)) - } -} - -func TestMgrSidecarReconcile(t *testing.T) { - activeMgr := "a" - calledMgrStat := false - calledMgrDump := false - spec := cephv1.ClusterSpec{ - Mgr: cephv1.MgrSpec{Count: 1}, - Dashboard: cephv1.DashboardSpec{ - Enabled: true, - Port: 7000, - }, - } - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "dump" { - calledMgrDump = true - } else if args[1] == "stat" { - calledMgrStat = true - } - return fmt.Sprintf(`{"active_name":"%s"}`, activeMgr), nil - }, - } - clientset := testop.New(t, 3) - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - ctx := &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset, - } - ownerInfo := cephclient.NewMinimumOwnerInfo(t) - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns", OwnerInfo: ownerInfo} - clusterInfo.SetName("test") - c := &Cluster{spec: spec, context: ctx, clusterInfo: clusterInfo} - - // Update services according to the active mgr - clusterInfo.CephVersion = cephver.CephVersion{Major: 15, Minor: 2, Build: 0} - err := c.ReconcileActiveMgrServices(activeMgr) - assert.NoError(t, err) - assert.False(t, calledMgrStat) - assert.True(t, calledMgrDump) - validateServices(t, c) - validateServiceMatches(t, c, "a") - - // nothing is created or updated when the requested mgr is not the active mgr - calledMgrDump = false - clusterInfo.CephVersion = cephver.CephVersion{Major: 16, Minor: 2, Build: 5} - err = c.ReconcileActiveMgrServices("b") - assert.NoError(t, err) - assert.True(t, calledMgrStat) - assert.False(t, calledMgrDump) - _, err = c.context.Clientset.CoreV1().Services(c.clusterInfo.Namespace).Get(context.TODO(), "rook-ceph-mgr", metav1.GetOptions{}) - assert.True(t, errors.IsNotFound(err)) - - // nothing is updated when the requested mgr is not the active mgr - activeMgr = "b" - err = c.ReconcileActiveMgrServices("b") - assert.NoError(t, err) - validateServices(t, c) - validateServiceMatches(t, c, "b") -} - -func validateServiceMatches(t *testing.T, c *Cluster, expectedActive string) { - // The service labels should match the active mgr - svc, err := c.context.Clientset.CoreV1().Services(c.clusterInfo.Namespace).Get(context.TODO(), "rook-ceph-mgr", metav1.GetOptions{}) - assert.NoError(t, err) - matchDaemon, ok := svc.Spec.Selector["ceph_daemon_id"] - assert.True(t, ok) - assert.Equal(t, expectedActive, matchDaemon) - - // clean up the service for the next test - err = c.context.Clientset.CoreV1().Services(c.clusterInfo.Namespace).Delete(context.TODO(), "rook-ceph-mgr", metav1.DeleteOptions{}) - assert.NoError(t, err) -} - -func TestConfigureModules(t *testing.T) { - modulesEnabled := 0 - modulesDisabled := 0 - configSettings := map[string]string{} - lastModuleConfigured := "" - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if command == "ceph" && len(args) > 3 { - if args[0] == "mgr" && args[1] == "module" { - if args[2] == "enable" { - modulesEnabled++ - } - if args[2] == "disable" { - modulesDisabled++ - } - lastModuleConfigured = args[3] - } - if args[0] == "config" && args[1] == "set" && args[2] == "global" { - configSettings[args[3]] = args[4] - } - } - return "", nil //return "{\"key\":\"mysecurekey\"}", nil - }, - } - - clientset := testop.New(t, 3) - context := &clusterd.Context{Executor: executor, Clientset: clientset} - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns"} - c := &Cluster{ - context: context, - clusterInfo: clusterInfo, - } - - // one module without any special configuration - c.spec.Mgr.Modules = []cephv1.Module{ - {Name: "mymodule", Enabled: true}, - } - assert.NoError(t, c.configureMgrModules()) - assert.Equal(t, 1, modulesEnabled) - assert.Equal(t, 0, modulesDisabled) - assert.Equal(t, "mymodule", lastModuleConfigured) - - // one module that has a min version that is not met - c.spec.Mgr.Modules = []cephv1.Module{ - {Name: "pg_autoscaler", Enabled: true}, - } - - // one module that has a min version that is met - c.spec.Mgr.Modules = []cephv1.Module{ - {Name: "pg_autoscaler", Enabled: true}, - } - c.clusterInfo.CephVersion = cephver.CephVersion{Major: 14} - modulesEnabled = 0 - assert.NoError(t, c.configureMgrModules()) - assert.Equal(t, 1, modulesEnabled) - assert.Equal(t, 0, modulesDisabled) - assert.Equal(t, "pg_autoscaler", lastModuleConfigured) - assert.Equal(t, 2, len(configSettings)) - assert.Equal(t, "on", configSettings["osd_pool_default_pg_autoscale_mode"]) - assert.Equal(t, "0", configSettings["mon_pg_warn_min_per_osd"]) - - // disable the module - modulesEnabled = 0 - lastModuleConfigured = "" - configSettings = map[string]string{} - c.spec.Mgr.Modules[0].Enabled = false - assert.NoError(t, c.configureMgrModules()) - assert.Equal(t, 0, modulesEnabled) - assert.Equal(t, 1, modulesDisabled) - assert.Equal(t, "pg_autoscaler", lastModuleConfigured) - assert.Equal(t, 0, len(configSettings)) -} - -func TestMgrDaemons(t *testing.T) { - spec := cephv1.ClusterSpec{ - Mgr: cephv1.MgrSpec{Count: 1}, - } - c := &Cluster{spec: spec} - daemons := c.getDaemonIDs() - require.Equal(t, 1, len(daemons)) - assert.Equal(t, "a", daemons[0]) - - c.spec.Mgr.Count = 2 - daemons = c.getDaemonIDs() - require.Equal(t, 2, len(daemons)) - assert.Equal(t, "a", daemons[0]) - assert.Equal(t, "b", daemons[1]) -} - -func TestApplyMonitoringLabels(t *testing.T) { - clusterSpec := cephv1.ClusterSpec{ - Labels: cephv1.LabelsSpec{}, - } - c := &Cluster{spec: clusterSpec} - sm := &monitoringv1.ServiceMonitor{Spec: monitoringv1.ServiceMonitorSpec{ - Endpoints: []monitoringv1.Endpoint{{}}}} - - // Service Monitor RelabelConfigs updated when 'rook.io/managedBy' monitoring label is found - monitoringLabels := cephv1.LabelsSpec{ - cephv1.KeyMonitoring: map[string]string{ - "rook.io/managedBy": "storagecluster"}, - } - c.spec.Labels = monitoringLabels - applyMonitoringLabels(c, sm) - fmt.Printf("Hello1") - assert.Equal(t, "managedBy", sm.Spec.Endpoints[0].RelabelConfigs[0].TargetLabel) - assert.Equal(t, "storagecluster", sm.Spec.Endpoints[0].RelabelConfigs[0].Replacement) - - // Service Monitor RelabelConfigs not updated when the required monitoring label is not found - monitoringLabels = cephv1.LabelsSpec{ - cephv1.KeyMonitoring: map[string]string{ - "wrongLabelKey": "storagecluster"}, - } - c.spec.Labels = monitoringLabels - sm.Spec.Endpoints[0].RelabelConfigs = nil - applyMonitoringLabels(c, sm) - assert.Nil(t, sm.Spec.Endpoints[0].RelabelConfigs) - - // Service Monitor RelabelConfigs not updated when no monitoring labels are found - c.spec.Labels = cephv1.LabelsSpec{} - sm.Spec.Endpoints[0].RelabelConfigs = nil - applyMonitoringLabels(c, sm) - assert.Nil(t, sm.Spec.Endpoints[0].RelabelConfigs) -} diff --git a/pkg/operator/ceph/cluster/mgr/orchestrator.go b/pkg/operator/ceph/cluster/mgr/orchestrator.go deleted file mode 100644 index 42ce382e7..000000000 --- a/pkg/operator/ceph/cluster/mgr/orchestrator.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package mgr for the Ceph manager. -package mgr - -import ( - "time" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/util/exec" -) - -const ( - rookModuleName = "rook" - orchestratorOldCLIName = "orchestrator" - orchestratorNewCLIName = "orch" -) - -var ( - orchestratorInitWaitTime = 5 * time.Second - orchestratorCLIName = orchestratorOldCLIName -) - -// Ceph docs about the orchestrator modules: http://docs.ceph.com/docs/master/mgr/orchestrator_cli/ -func (c *Cluster) configureOrchestratorModules() error { - if err := client.MgrEnableModule(c.context, c.clusterInfo, rookModuleName, true); err != nil { - return errors.Wrap(err, "failed to enable mgr rook module") - } - if err := c.setRookOrchestratorBackend(); err != nil { - return errors.Wrap(err, "failed to set rook orchestrator backend") - } - return nil -} - -func (c *Cluster) setRookOrchestratorBackend() error { - if c.clusterInfo.CephVersion.IsAtLeastOctopus() { - orchestratorCLIName = orchestratorNewCLIName - } - // retry a few times in the case that the mgr module is not ready to accept commands - _, err := client.ExecuteCephCommandWithRetry(func() (string, []byte, error) { - args := []string{orchestratorCLIName, "set", "backend", "rook"} - output, err := client.NewCephCommand(c.context, c.clusterInfo, args).RunWithTimeout(exec.CephCommandsTimeout) - return "set rook backend", output, err - }, c.exitCode, 5, invalidArgErrorCode, orchestratorInitWaitTime) - if err != nil { - return errors.Wrap(err, "failed to set rook as the orchestrator backend") - } - - return nil -} diff --git a/pkg/operator/ceph/cluster/mgr/orchestrator_test.go b/pkg/operator/ceph/cluster/mgr/orchestrator_test.go deleted file mode 100644 index 66cf8ab56..000000000 --- a/pkg/operator/ceph/cluster/mgr/orchestrator_test.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package mgr - -import ( - "testing" - "time" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/util/exec" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestOrchestratorModules(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - rookModuleEnabled := false - rookBackendSet := false - backendErrorCount := 0 - exec.CephCommandsTimeout = 15 * time.Second - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "mgr" && args[1] == "module" && args[2] == "enable" { - if args[3] == "rook" { - rookModuleEnabled = true - return "", nil - } - } - return "", errors.Errorf("unexpected ceph command '%v'", args) - } - executor.MockExecuteCommandWithTimeout = func(timeout time.Duration, command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "orchestrator" && args[1] == "set" && args[2] == "backend" && args[3] == "rook" { - if backendErrorCount < 5 { - backendErrorCount++ - return "", errors.New("test simulation failure") - } - rookBackendSet = true - return "", nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.Nautilus, - } - - c := &Cluster{clusterInfo: clusterInfo, context: context} - c.exitCode = func(err error) (int, bool) { - return invalidArgErrorCode, true - } - orchestratorInitWaitTime = 0 - - err := c.configureOrchestratorModules() - assert.Error(t, err) - err = c.setRookOrchestratorBackend() - assert.NoError(t, err) - assert.True(t, rookModuleEnabled) - assert.True(t, rookBackendSet) - assert.Equal(t, 5, backendErrorCount) - - // the rook module will succeed - err = c.configureOrchestratorModules() - assert.NoError(t, err) - err = c.setRookOrchestratorBackend() - assert.NoError(t, err) - assert.True(t, rookModuleEnabled) - assert.True(t, rookBackendSet) - - // Simulate the error because of the CLI name change - c.clusterInfo.CephVersion = cephver.Octopus - err = c.setRookOrchestratorBackend() - assert.Error(t, err) - executor.MockExecuteCommandWithTimeout = func(timeout time.Duration, command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "orch" && args[1] == "set" && args[2] == "backend" && args[3] == "rook" { - if backendErrorCount < 5 { - backendErrorCount++ - return "", errors.New("test simulation failure") - } - rookBackendSet = true - return "", nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - err = c.setRookOrchestratorBackend() - assert.NoError(t, err) -} diff --git a/pkg/operator/ceph/cluster/mgr/spec.go b/pkg/operator/ceph/cluster/mgr/spec.go deleted file mode 100644 index 9e2114026..000000000 --- a/pkg/operator/ceph/cluster/mgr/spec.go +++ /dev/null @@ -1,370 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mgr - -import ( - "fmt" - "os" - "strconv" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - podIPEnvVar = "ROOK_POD_IP" - serviceMetricName = "http-metrics" -) - -func (c *Cluster) makeDeployment(mgrConfig *mgrConfig) (*apps.Deployment, error) { - logger.Debugf("mgrConfig: %+v", mgrConfig) - - volumes := controller.DaemonVolumes(mgrConfig.DataPathMap, mgrConfig.ResourceName) - if c.spec.Network.IsMultus() { - adminKeyringVol, _ := keyring.Volume().Admin(), keyring.VolumeMount().Admin() - volumes = append(volumes, adminKeyringVol) - } - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: mgrConfig.ResourceName, - Labels: c.getPodLabels(mgrConfig.DaemonID, true), - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - c.makeChownInitContainer(mgrConfig), - }, - Containers: []v1.Container{ - c.makeMgrDaemonContainer(mgrConfig), - }, - ServiceAccountName: serviceAccountName, - RestartPolicy: v1.RestartPolicyAlways, - Volumes: volumes, - HostNetwork: c.spec.Network.IsHost(), - PriorityClassName: cephv1.GetMgrPriorityClassName(c.spec.PriorityClassNames), - }, - } - cephv1.GetMgrPlacement(c.spec.Placement).ApplyToPodSpec(&podSpec.Spec) - - // Run the sidecar and require anti affinity only if there are multiple mgrs - if c.spec.Mgr.Count > 1 { - podSpec.Spec.Containers = append(podSpec.Spec.Containers, c.makeMgrSidecarContainer(mgrConfig)) - matchLabels := controller.AppLabels(AppName, c.clusterInfo.Namespace) - - // Stretch the mgrs across hosts by default, or across a bigger failure domain for stretch clusters - topologyKey := v1.LabelHostname - if c.spec.IsStretchCluster() { - topologyKey = mon.StretchFailureDomainLabel(c.spec) - } - k8sutil.SetNodeAntiAffinityForPod(&podSpec.Spec, !c.spec.Mgr.AllowMultiplePerNode, topologyKey, matchLabels, nil) - } - - // If the log collector is enabled we add the side-car container - if c.spec.LogCollector.Enabled { - shareProcessNamespace := true - podSpec.Spec.ShareProcessNamespace = &shareProcessNamespace - podSpec.Spec.Containers = append(podSpec.Spec.Containers, *controller.LogCollectorContainer(fmt.Sprintf("ceph-mgr.%s", mgrConfig.DaemonID), c.clusterInfo.Namespace, c.spec)) - } - - // Replace default unreachable node toleration - k8sutil.AddUnreachableNodeToleration(&podSpec.Spec) - - if c.spec.Network.IsHost() { - podSpec.Spec.DNSPolicy = v1.DNSClusterFirstWithHostNet - } else if c.spec.Network.IsMultus() { - if err := k8sutil.ApplyMultus(c.spec.Network, &podSpec.ObjectMeta); err != nil { - return nil, err - } - podSpec.Spec.Containers = append(podSpec.Spec.Containers, c.makeCmdProxySidecarContainer(mgrConfig)) - } - - cephv1.GetMgrAnnotations(c.spec.Annotations).ApplyToObjectMeta(&podSpec.ObjectMeta) - c.applyPrometheusAnnotations(&podSpec.ObjectMeta) - cephv1.GetMgrLabels(c.spec.Labels).ApplyToObjectMeta(&podSpec.ObjectMeta) - - replicas := int32(1) - - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: mgrConfig.ResourceName, - Namespace: c.clusterInfo.Namespace, - Labels: c.getPodLabels(mgrConfig.DaemonID, true), - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: c.getPodLabels(mgrConfig.DaemonID, false), - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - k8sutil.AddRookVersionLabelToDeployment(d) - cephv1.GetMgrLabels(c.spec.Labels).ApplyToObjectMeta(&d.ObjectMeta) - controller.AddCephVersionLabelToDeployment(c.clusterInfo.CephVersion, d) - err := c.clusterInfo.OwnerInfo.SetControllerReference(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to mgr deployment %q", d.Name) - } - return d, nil -} - -func (c *Cluster) makeChownInitContainer(mgrConfig *mgrConfig) v1.Container { - return controller.ChownCephDataDirsInitContainer( - *mgrConfig.DataPathMap, - c.spec.CephVersion.Image, - controller.DaemonVolumeMounts(mgrConfig.DataPathMap, mgrConfig.ResourceName), - cephv1.GetMgrResources(c.spec.Resources), - controller.PodSecurityContext(), - ) -} - -func (c *Cluster) makeMgrDaemonContainer(mgrConfig *mgrConfig) v1.Container { - - container := v1.Container{ - Name: "mgr", - Command: []string{ - "ceph-mgr", - }, - Args: append( - controller.DaemonFlags(c.clusterInfo, &c.spec, mgrConfig.DaemonID), - // for ceph-mgr cephfs - // see https://github.com/ceph/ceph-csi/issues/486 for more details - config.NewFlag("client-mount-uid", "0"), - config.NewFlag("client-mount-gid", "0"), - "--foreground", - ), - Image: c.spec.CephVersion.Image, - VolumeMounts: controller.DaemonVolumeMounts(mgrConfig.DataPathMap, mgrConfig.ResourceName), - Ports: []v1.ContainerPort{ - { - Name: "mgr", - ContainerPort: int32(6800), - Protocol: v1.ProtocolTCP, - }, - { - Name: "http-metrics", - ContainerPort: int32(DefaultMetricsPort), - Protocol: v1.ProtocolTCP, - }, - { - Name: "dashboard", - ContainerPort: int32(c.dashboardPort()), - Protocol: v1.ProtocolTCP, - }, - }, - Env: append( - controller.DaemonEnvVars(c.spec.CephVersion.Image), - c.cephMgrOrchestratorModuleEnvs()..., - ), - Resources: cephv1.GetMgrResources(c.spec.Resources), - SecurityContext: controller.PodSecurityContext(), - LivenessProbe: getDefaultMgrLivenessProbe(), - WorkingDir: config.VarLogCephDir, - } - - // If the liveness probe is enabled - container = config.ConfigureLivenessProbe(cephv1.KeyMgr, container, c.spec.HealthCheck) - - // If host networking is enabled, we don't need a bind addr that is different from the public addr - if !c.spec.Network.IsHost() { - // Opposite of the above, --public-bind-addr will *not* still advertise on the previous - // port, which makes sense because this is the pod IP, which changes with every new pod. - container.Args = append(container.Args, - config.NewFlag("public-addr", controller.ContainerEnvVarReference(podIPEnvVar))) - } - - return container -} - -func (c *Cluster) makeMgrSidecarContainer(mgrConfig *mgrConfig) v1.Container { - envVars := []v1.EnvVar{ - {Name: "ROOK_CLUSTER_ID", Value: string(c.clusterInfo.OwnerInfo.GetUID())}, - {Name: "ROOK_CLUSTER_NAME", Value: string(c.clusterInfo.NamespacedName().Name)}, - k8sutil.PodIPEnvVar(k8sutil.PrivateIPEnvVar), - k8sutil.PodIPEnvVar(k8sutil.PublicIPEnvVar), - mon.PodNamespaceEnvVar(c.clusterInfo.Namespace), - mon.EndpointEnvVar(), - mon.SecretEnvVar(), - mon.CephUsernameEnvVar(), - mon.CephSecretEnvVar(), - k8sutil.ConfigOverrideEnvVar(), - {Name: "ROOK_FSID", ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "rook-ceph-mon"}, - Key: "fsid", - }, - }}, - {Name: "ROOK_DASHBOARD_ENABLED", Value: strconv.FormatBool(c.spec.Dashboard.Enabled)}, - {Name: "ROOK_MONITORING_ENABLED", Value: strconv.FormatBool(c.spec.Monitoring.Enabled)}, - {Name: "ROOK_UPDATE_INTERVAL", Value: "15s"}, - {Name: "ROOK_DAEMON_NAME", Value: mgrConfig.DaemonID}, - {Name: "ROOK_CEPH_VERSION", Value: "ceph version " + c.clusterInfo.CephVersion.String()}, - } - - return v1.Container{ - Args: []string{"ceph", "mgr", "watch-active"}, - Name: "watch-active", - Image: c.rookVersion, - Env: envVars, - Resources: cephv1.GetMgrSidecarResources(c.spec.Resources), - } -} - -func (c *Cluster) makeCmdProxySidecarContainer(mgrConfig *mgrConfig) v1.Container { - _, adminKeyringVolMount := keyring.Volume().Admin(), keyring.VolumeMount().Admin() - container := v1.Container{ - Name: client.CommandProxyInitContainerName, - Command: []string{"sleep"}, - Args: []string{"infinity"}, - Image: c.spec.CephVersion.Image, - VolumeMounts: append(controller.DaemonVolumeMounts(mgrConfig.DataPathMap, mgrConfig.ResourceName), adminKeyringVolMount), - Env: append(controller.DaemonEnvVars(c.spec.CephVersion.Image), v1.EnvVar{Name: "CEPH_ARGS", Value: fmt.Sprintf("-m $(ROOK_CEPH_MON_HOST) -k %s", keyring.VolumeMount().AdminKeyringFilePath())}), - Resources: cephv1.GetMgrResources(c.spec.Resources), - SecurityContext: controller.PodSecurityContext(), - } - - return container -} - -func getDefaultMgrLivenessProbe() *v1.Probe { - return &v1.Probe{ - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Path: "/", - Port: intstr.FromInt(int(DefaultMetricsPort)), - }, - }, - InitialDelaySeconds: 60, - } -} - -// MakeMetricsService generates the Kubernetes service object for the monitoring service -func (c *Cluster) MakeMetricsService(name, activeDaemon, servicePortMetricName string) (*v1.Service, error) { - labels := c.selectorLabels(activeDaemon) - - svc := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: c.clusterInfo.Namespace, - Labels: labels, - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeClusterIP, - Ports: []v1.ServicePort{ - { - Name: servicePortMetricName, - Port: int32(DefaultMetricsPort), - Protocol: v1.ProtocolTCP, - }, - }, - }, - } - - // If the cluster is external we don't need to add the selector - if name != controller.ExternalMgrAppName { - svc.Spec.Selector = labels - } - - err := c.clusterInfo.OwnerInfo.SetControllerReference(svc) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to monitoring service %q", svc.Name) - } - return svc, nil -} - -func (c *Cluster) makeDashboardService(name, activeDaemon string) (*v1.Service, error) { - labels := c.selectorLabels(activeDaemon) - - portName := "https-dashboard" - if !c.spec.Dashboard.SSL { - portName = "http-dashboard" - } - svc := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-dashboard", name), - Namespace: c.clusterInfo.Namespace, - Labels: labels, - }, - Spec: v1.ServiceSpec{ - Selector: labels, - Type: v1.ServiceTypeClusterIP, - Ports: []v1.ServicePort{ - { - Name: portName, - Port: int32(c.dashboardPort()), - Protocol: v1.ProtocolTCP, - }, - }, - }, - } - err := c.clusterInfo.OwnerInfo.SetControllerReference(svc) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to dashboard service %q", svc.Name) - } - return svc, nil -} - -func (c *Cluster) getPodLabels(daemonName string, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, c.clusterInfo.Namespace, "mgr", daemonName, includeNewLabels) - // leave "instance" key for legacy usage - labels["instance"] = daemonName - return labels -} - -func (c *Cluster) applyPrometheusAnnotations(objectMeta *metav1.ObjectMeta) { - if len(cephv1.GetMgrAnnotations(c.spec.Annotations)) == 0 { - t := rook.Annotations{ - "prometheus.io/scrape": "true", - "prometheus.io/port": strconv.Itoa(int(DefaultMetricsPort)), - } - - t.ApplyToObjectMeta(objectMeta) - } -} - -func (c *Cluster) cephMgrOrchestratorModuleEnvs() []v1.EnvVar { - operatorNamespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - envVars := []v1.EnvVar{ - {Name: "ROOK_OPERATOR_NAMESPACE", Value: operatorNamespace}, - {Name: "ROOK_CEPH_CLUSTER_CRD_VERSION", Value: cephv1.Version}, - {Name: "ROOK_CEPH_CLUSTER_CRD_NAME", Value: c.clusterInfo.NamespacedName().Name}, - k8sutil.PodIPEnvVar(podIPEnvVar), - } - return envVars -} - -func (c *Cluster) selectorLabels(activeDaemon string) map[string]string { - labels := controller.AppLabels(AppName, c.clusterInfo.Namespace) - if activeDaemon != "" { - labels[controller.DaemonIDLabel] = activeDaemon - } - return labels -} diff --git a/pkg/operator/ceph/cluster/mgr/spec_test.go b/pkg/operator/ceph/cluster/mgr/spec_test.go deleted file mode 100644 index 3f404c94c..000000000 --- a/pkg/operator/ceph/cluster/mgr/spec_test.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mgr - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/test" - optest "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" -) - -func TestPodSpec(t *testing.T) { - clientset := optest.New(t, 1) - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns", FSID: "myfsid", OwnerInfo: ownerInfo} - clusterInfo.SetName("test") - clusterSpec := cephv1.ClusterSpec{ - CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:myceph"}, - Dashboard: cephv1.DashboardSpec{Port: 1234}, - PriorityClassNames: map[rook.KeyType]string{cephv1.KeyMgr: "my-priority-class"}, - DataDirHostPath: "/var/lib/rook/", - Resources: cephv1.ResourceSpec{string(cephv1.KeyMgr): v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(200.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(500.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(250.0, resource.BinarySI), - }, - }, - }, - } - c := New(&clusterd.Context{Clientset: clientset}, clusterInfo, clusterSpec, "rook/rook:myversion") - - mgrTestConfig := mgrConfig{ - DaemonID: "a", - ResourceName: "rook-ceph-mgr-a", - DataPathMap: config.NewStatelessDaemonDataPathMap(config.MgrType, "a", "rook-ceph", "/var/lib/rook/"), - } - - t.Run("traditional deployment", func(t *testing.T) { - d, err := c.makeDeployment(&mgrTestConfig) - assert.NoError(t, err) - - // Deployment should have Ceph labels - test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.MgrType, "a", AppName, "ns") - - podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) - podTemplate.Spec().Containers().RequireAdditionalEnvVars( - "ROOK_OPERATOR_NAMESPACE", "ROOK_CEPH_CLUSTER_CRD_VERSION", - "ROOK_CEPH_CLUSTER_CRD_NAME") - podTemplate.RunFullSuite(config.MgrType, "a", AppName, "ns", "quay.io/ceph/ceph:myceph", - "200", "100", "500", "250", /* resources */ - "my-priority-class") - assert.Equal(t, 2, len(d.Spec.Template.Annotations)) - assert.Equal(t, 1, len(d.Spec.Template.Spec.Containers)) - assert.Equal(t, 5, len(d.Spec.Template.Spec.Containers[0].VolumeMounts)) - }) - - t.Run("deployment with multus with new sidecar proxy command container", func(t *testing.T) { - c.spec.Network.Provider = "multus" - d, err := c.makeDeployment(&mgrTestConfig) - assert.NoError(t, err) - assert.Equal(t, 3, len(d.Spec.Template.Annotations)) // Multus annotations - assert.Equal(t, 2, len(d.Spec.Template.Spec.Containers)) // mgr pod + sidecar - assert.Equal(t, client.CommandProxyInitContainerName, d.Spec.Template.Spec.Containers[1].Name) // sidecar pod - assert.Equal(t, 6, len(d.Spec.Template.Spec.Containers[1].VolumeMounts)) // + admin keyring - assert.Equal(t, "CEPH_ARGS", d.Spec.Template.Spec.Containers[1].Env[len(d.Spec.Template.Spec.Containers[1].Env)-1].Name) // connection info to the cluster - assert.Equal(t, "-m $(ROOK_CEPH_MON_HOST) -k /etc/ceph/admin-keyring-store/keyring", d.Spec.Template.Spec.Containers[1].Env[len(d.Spec.Template.Spec.Containers[1].Env)-1].Value) // connection info to the cluster - }) -} - -func TestServiceSpec(t *testing.T) { - clientset := optest.New(t, 1) - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns", FSID: "myfsid", OwnerInfo: ownerInfo} - clusterSpec := cephv1.ClusterSpec{} - c := New(&clusterd.Context{Clientset: clientset}, clusterInfo, clusterSpec, "myversion") - - s, err := c.MakeMetricsService("rook-mgr", "foo", serviceMetricName) - assert.NoError(t, err) - assert.NotNil(t, s) - assert.Equal(t, "rook-mgr", s.Name) - assert.Equal(t, 1, len(s.Spec.Ports)) - assert.Equal(t, 3, len(s.Labels)) - assert.Equal(t, 3, len(s.Spec.Selector)) - assert.Equal(t, "foo", s.Spec.Selector[controller.DaemonIDLabel]) -} - -func TestHostNetwork(t *testing.T) { - clientset := optest.New(t, 1) - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns", FSID: "myfsid", OwnerInfo: ownerInfo} - clusterInfo.SetName("test") - clusterSpec := cephv1.ClusterSpec{ - Network: cephv1.NetworkSpec{HostNetwork: true}, - Dashboard: cephv1.DashboardSpec{Port: 1234}, - DataDirHostPath: "/var/lib/rook/", - } - c := New(&clusterd.Context{Clientset: clientset}, clusterInfo, clusterSpec, "myversion") - - mgrTestConfig := mgrConfig{ - DaemonID: "a", - ResourceName: "mgr-a", - DataPathMap: config.NewStatelessDaemonDataPathMap(config.MgrType, "a", "rook-ceph", "/var/lib/rook/"), - } - - d, err := c.makeDeployment(&mgrTestConfig) - assert.NoError(t, err) - assert.NotNil(t, d) - - assert.Equal(t, true, c.spec.Network.IsHost()) - assert.Equal(t, v1.DNSClusterFirstWithHostNet, d.Spec.Template.Spec.DNSPolicy) -} - -func TestApplyPrometheusAnnotations(t *testing.T) { - clientset := optest.New(t, 1) - clusterSpec := cephv1.ClusterSpec{ - DataDirHostPath: "/var/lib/rook/", - } - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns", FSID: "myfsid", OwnerInfo: ownerInfo} - clusterInfo.SetName("test") - c := New(&clusterd.Context{Clientset: clientset}, clusterInfo, clusterSpec, "myversion") - - mgrTestConfig := mgrConfig{ - DaemonID: "a", - ResourceName: "rook-ceph-mgr-a", - DataPathMap: config.NewStatelessDaemonDataPathMap(config.MgrType, "a", "rook-ceph", "/var/lib/rook/"), - } - - d, err := c.makeDeployment(&mgrTestConfig) - assert.NoError(t, err) - - // Test without annotations - c.applyPrometheusAnnotations(&d.ObjectMeta) - assert.Equal(t, 2, len(d.ObjectMeta.Annotations)) - - // Test with existing annotations - // applyPrometheusAnnotations() shouldn't do anything - // re-initialize "d" - d, err = c.makeDeployment(&mgrTestConfig) - assert.NoError(t, err) - - fakeAnnotations := rook.Annotations{ - "foo.io/bar": "foobar", - } - c.spec.Annotations = map[rook.KeyType]rook.Annotations{cephv1.KeyMgr: fakeAnnotations} - - c.applyPrometheusAnnotations(&d.ObjectMeta) - assert.Equal(t, 1, len(c.spec.Annotations)) - assert.Equal(t, 0, len(d.ObjectMeta.Annotations)) -} diff --git a/pkg/operator/ceph/cluster/mon/config.go b/pkg/operator/ceph/cluster/mon/config.go deleted file mode 100644 index 66be39ceb..000000000 --- a/pkg/operator/ceph/cluster/mon/config.go +++ /dev/null @@ -1,385 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/csi" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - "github.com/rook/rook/pkg/util/sys" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -const ( - // All mons share the same keyring - keyringStoreName = "rook-ceph-mons" - - // The final string field is for the admin keyring - keyringTemplate = ` -[mon.] - key = %s - caps mon = "allow *" - -%s` - - externalConnectionRetry = 60 * time.Second -) - -func (c *Cluster) genMonSharedKeyring() string { - return fmt.Sprintf( - keyringTemplate, - c.ClusterInfo.MonitorSecret, - cephclient.CephKeyring(c.ClusterInfo.CephCred), - ) -} - -// return mon data dir path relative to the dataDirHostPath given a mon's name -func dataDirRelativeHostPath(monName string) string { - monHostDir := monName // support legacy case where the mon name is "mon#" and not a lettered ID - if !strings.Contains(monName, "mon") { - // if the mon name doesn't have "mon" in it, mon dir is "mon-" - monHostDir = "mon-" + monName - } - // Keep existing behavior where Rook stores the mon's data in the "data" subdir - return path.Join(monHostDir, "data") -} - -// LoadClusterInfo constructs or loads a clusterinfo and returns it along with the maxMonID -func LoadClusterInfo(context *clusterd.Context, namespace string) (*cephclient.ClusterInfo, int, *Mapping, error) { - return CreateOrLoadClusterInfo(context, namespace, nil) -} - -// CreateOrLoadClusterInfo constructs or loads a clusterinfo and returns it along with the maxMonID -func CreateOrLoadClusterInfo(clusterdContext *clusterd.Context, namespace string, ownerInfo *k8sutil.OwnerInfo) (*cephclient.ClusterInfo, int, *Mapping, error) { - ctx := context.TODO() - var clusterInfo *cephclient.ClusterInfo - maxMonID := -1 - monMapping := &Mapping{ - Schedule: map[string]*MonScheduleInfo{}, - } - - secrets, err := clusterdContext.Clientset.CoreV1().Secrets(namespace).Get(ctx, AppName, metav1.GetOptions{}) - if err != nil { - if !kerrors.IsNotFound(err) { - return nil, maxMonID, monMapping, errors.Wrap(err, "failed to get mon secrets") - } - if ownerInfo == nil { - return nil, maxMonID, monMapping, errors.New("not expected to create new cluster info and did not find existing secret") - } - - clusterInfo, err = createNamedClusterInfo(clusterdContext, namespace) - if err != nil { - return nil, maxMonID, monMapping, errors.Wrap(err, "failed to create mon secrets") - } - - err = createClusterAccessSecret(clusterdContext.Clientset, namespace, clusterInfo, ownerInfo) - if err != nil { - return nil, maxMonID, monMapping, err - } - } else { - clusterInfo = &cephclient.ClusterInfo{ - Namespace: namespace, - FSID: string(secrets.Data[fsidSecretNameKey]), - MonitorSecret: string(secrets.Data[monSecretNameKey]), - } - if cephUsername, ok := secrets.Data[cephUsernameKey]; ok { - clusterInfo.CephCred.Username = string(cephUsername) - clusterInfo.CephCred.Secret = string(secrets.Data[cephUserSecretKey]) - } else if adminSecretKey, ok := secrets.Data[adminSecretNameKey]; ok { - clusterInfo.CephCred.Username = cephclient.AdminUsername - clusterInfo.CephCred.Secret = string(adminSecretKey) - - secrets.Data[cephUsernameKey] = []byte(cephclient.AdminUsername) - secrets.Data[cephUserSecretKey] = adminSecretKey - if _, err = clusterdContext.Clientset.CoreV1().Secrets(namespace).Update(ctx, secrets, metav1.UpdateOptions{}); err != nil { - return nil, maxMonID, monMapping, errors.Wrap(err, "failed to update mon secrets") - } - } else { - return nil, maxMonID, monMapping, errors.New("failed to find either the cluster admin key or the username") - } - logger.Debugf("found existing monitor secrets for cluster %s", clusterInfo.Namespace) - } - - // get the existing monitor config - clusterInfo.Monitors, maxMonID, monMapping, err = loadMonConfig(clusterdContext.Clientset, namespace) - if err != nil { - return nil, maxMonID, monMapping, errors.Wrap(err, "failed to get mon config") - } - - // If an admin key was provided we don't need to load the other resources - // Some people might want to give the admin key - // The necessary users/keys/secrets will be created by Rook - // This is also done to allow backward compatibility - if clusterInfo.CephCred.Username == cephclient.AdminUsername && clusterInfo.CephCred.Secret != adminSecretNameKey { - return clusterInfo, maxMonID, monMapping, nil - } - - // If the admin secret is "admin-secret", look for the deprecated secret that has the external creds - if clusterInfo.CephCred.Secret == adminSecretNameKey { - secret, err := clusterdContext.Clientset.CoreV1().Secrets(namespace).Get(ctx, OperatorCreds, metav1.GetOptions{}) - if err != nil { - return clusterInfo, maxMonID, monMapping, err - } - // Populate external credential - clusterInfo.CephCred.Username = string(secret.Data["userID"]) - clusterInfo.CephCred.Secret = string(secret.Data["userKey"]) - } - - if err := ValidateCephCSIConnectionSecrets(clusterdContext, namespace); err != nil { - return clusterInfo, maxMonID, monMapping, err - } - - return clusterInfo, maxMonID, monMapping, nil -} - -// ValidateCephCSIConnectionSecrets returns the secret value of the client health checker key -func ValidateCephCSIConnectionSecrets(clusterdContext *clusterd.Context, namespace string) error { - ctx := context.TODO() - _, err := clusterdContext.Clientset.CoreV1().Secrets(namespace).Get(ctx, csi.CsiRBDNodeSecret, metav1.GetOptions{}) - if err != nil { - if !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get %q secret", csi.CsiRBDNodeSecret) - } - } - - _, err = clusterdContext.Clientset.CoreV1().Secrets(namespace).Get(ctx, csi.CsiRBDProvisionerSecret, metav1.GetOptions{}) - if err != nil { - if !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get %q secret", csi.CsiRBDProvisionerSecret) - } - } - - _, err = clusterdContext.Clientset.CoreV1().Secrets(namespace).Get(ctx, csi.CsiCephFSNodeSecret, metav1.GetOptions{}) - if err != nil { - if !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get %q secret", csi.CsiCephFSNodeSecret) - } - } - - _, err = clusterdContext.Clientset.CoreV1().Secrets(namespace).Get(ctx, csi.CsiCephFSProvisionerSecret, metav1.GetOptions{}) - if err != nil { - if !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get %q secret", csi.CsiCephFSProvisionerSecret) - } - } - - return nil -} - -// WriteConnectionConfig save monitor connection config to disk -func WriteConnectionConfig(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo) error { - // write the latest config to the config dir - if _, err := cephclient.GenerateConnectionConfig(context, clusterInfo); err != nil { - return errors.Wrap(err, "failed to write connection config") - } - - return nil -} - -// loadMonConfig returns the monitor endpoints and maxMonID -func loadMonConfig(clientset kubernetes.Interface, namespace string) (map[string]*cephclient.MonInfo, int, *Mapping, error) { - ctx := context.TODO() - monEndpointMap := map[string]*cephclient.MonInfo{} - maxMonID := -1 - monMapping := &Mapping{ - Schedule: map[string]*MonScheduleInfo{}, - } - - cm, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, EndpointConfigMapName, metav1.GetOptions{}) - if err != nil { - if !kerrors.IsNotFound(err) { - return nil, maxMonID, monMapping, err - } - // If the config map was not found, initialize the empty set of monitors - return monEndpointMap, maxMonID, monMapping, nil - } - - // Parse the monitor List - if info, ok := cm.Data[EndpointDataKey]; ok { - monEndpointMap = ParseMonEndpoints(info) - } - - // Parse the max monitor id - storedMaxMonID := -1 - if id, ok := cm.Data[MaxMonIDKey]; ok { - storedMaxMonID, err = strconv.Atoi(id) - if err != nil { - logger.Errorf("invalid max mon id %q. %v", id, err) - } else { - maxMonID = storedMaxMonID - } - } - - // Make sure the max id is consistent with the current monitors - for _, m := range monEndpointMap { - id, _ := fullNameToIndex(m.Name) - if maxMonID < id { - maxMonID = id - } - } - if maxMonID != storedMaxMonID { - logger.Infof("updating obsolete maxMonID %d to actual value %d", storedMaxMonID, maxMonID) - } - - err = json.Unmarshal([]byte(cm.Data[MappingKey]), &monMapping) - if err != nil { - logger.Errorf("invalid JSON in mon mapping. %v", err) - } - - logger.Debugf("loaded: maxMonID=%d, mons=%+v, assignment=%+v", maxMonID, monEndpointMap, monMapping) - return monEndpointMap, maxMonID, monMapping, nil -} - -func createClusterAccessSecret(clientset kubernetes.Interface, namespace string, clusterInfo *cephclient.ClusterInfo, ownerInfo *k8sutil.OwnerInfo) error { - ctx := context.TODO() - logger.Infof("creating mon secrets for a new cluster") - var err error - - // store the secrets for internal usage of the rook pods - secrets := map[string][]byte{ - fsidSecretNameKey: []byte(clusterInfo.FSID), - monSecretNameKey: []byte(clusterInfo.MonitorSecret), - cephUsernameKey: []byte(clusterInfo.CephCred.Username), - cephUserSecretKey: []byte(clusterInfo.CephCred.Secret), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: AppName, - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - err = ownerInfo.SetControllerReference(secret) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to mon secret %q", secret.Name) - } - if _, err = clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}); err != nil { - return errors.Wrap(err, "failed to save mon secrets") - } - - return nil -} - -// create new cluster info (FSID, shared keys) -func createNamedClusterInfo(context *clusterd.Context, namespace string) (*cephclient.ClusterInfo, error) { - fsid, err := uuid.NewRandom() - if err != nil { - return nil, err - } - - dir := path.Join(context.ConfigDir, namespace) - if err = os.MkdirAll(dir, 0744); err != nil { - return nil, errors.Wrapf(err, "failed to create dir %s", dir) - } - - // generate the mon secret - monSecret, err := genSecret(context.Executor, dir, "mon.", []string{"--cap", "mon", "'allow *'"}) - if err != nil { - return nil, err - } - - // generate the admin secret if one was not provided at the command line - args := []string{ - "--cap", "mon", "'allow *'", - "--cap", "osd", "'allow *'", - "--cap", "mgr", "'allow *'", - "--cap", "mds", "'allow'"} - adminSecret, err := genSecret(context.Executor, dir, cephclient.AdminUsername, args) - if err != nil { - return nil, err - } - - return &cephclient.ClusterInfo{ - FSID: fsid.String(), - MonitorSecret: monSecret, - Namespace: namespace, - CephCred: cephclient.CephCred{ - Username: cephclient.AdminUsername, - Secret: adminSecret, - }, - }, nil -} - -func genSecret(executor exec.Executor, configDir, name string, args []string) (string, error) { - path := path.Join(configDir, fmt.Sprintf("%s.keyring", name)) - path = strings.Replace(path, "..", ".", 1) - base := []string{ - "--create-keyring", - path, - "--gen-key", - "-n", name, - } - args = append(base, args...) - _, err := executor.ExecuteCommandWithOutput("ceph-authtool", args...) - if err != nil { - return "", errors.Wrap(err, "failed to gen secret") - } - - contents, err := ioutil.ReadFile(filepath.Clean(path)) - if err != nil { - return "", errors.Wrap(err, "failed to read secret file") - } - return ExtractKey(string(contents)) -} - -// ExtractKey retrieves mon secret key from the keyring file -func ExtractKey(contents string) (string, error) { - secret := "" - slice := strings.Fields(sys.Grep(string(contents), "key")) - if len(slice) >= 3 { - secret = slice[2] - } - if secret == "" { - return "", errors.New("failed to parse secret") - } - return secret, nil -} - -// PopulateExternalClusterInfo Add validation in the code to fail if the external cluster has no OSDs keep waiting -func PopulateExternalClusterInfo(context *clusterd.Context, namespace string, ownerInfo *k8sutil.OwnerInfo) *cephclient.ClusterInfo { - for { - clusterInfo, _, _, err := LoadClusterInfo(context, namespace) - if err != nil { - logger.Warningf("waiting for the csi connection info of the external cluster. retrying in %s.", externalConnectionRetry.String()) - logger.Debugf("%v", err) - time.Sleep(externalConnectionRetry) - continue - } - logger.Infof("found the cluster info to connect to the external cluster. will use %q to check health and monitor status. mons=%+v", clusterInfo.CephCred.Username, clusterInfo.Monitors) - clusterInfo.OwnerInfo = ownerInfo - return clusterInfo - } -} diff --git a/pkg/operator/ceph/cluster/mon/config_test.go b/pkg/operator/ceph/cluster/mon/config_test.go deleted file mode 100644 index b31bdc859..000000000 --- a/pkg/operator/ceph/cluster/mon/config_test.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestCreateClusterSecrets(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 1) - configDir := "ns" - err := os.MkdirAll(configDir, 0755) - assert.NoError(t, err) - defer os.RemoveAll(configDir) - adminSecret := "AQDkLIBd9vLGJxAAnXsIKPrwvUXAmY+D1g0X1Q==" //nolint:gosec // This is just a var name, not a real secret - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("COMMAND: %s %v", command, args) - if command == "ceph-authtool" && args[0] == "--create-keyring" { - filename := args[1] - assert.NoError(t, ioutil.WriteFile(filename, []byte(fmt.Sprintf("key = %s", adminSecret)), 0600)) - } - return "", nil - }, - } - context := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - } - namespace := "ns" - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - info, maxID, mapping, err := CreateOrLoadClusterInfo(context, namespace, ownerInfo) - assert.NoError(t, err) - assert.Equal(t, -1, maxID) - require.NotNil(t, info) - assert.Equal(t, "client.admin", info.CephCred.Username) - assert.Equal(t, adminSecret, info.CephCred.Secret) - assert.NotEqual(t, "", info.FSID) - assert.NotNil(t, mapping) - - // check for the cluster secret - secret, err := clientset.CoreV1().Secrets(namespace).Get(ctx, "rook-ceph-mon", metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, adminSecret, string(secret.Data["ceph-secret"])) - - // For backward compatibility check that the admin secret can be loaded as previously specified - // Update the secret as if created in an old cluster - delete(secret.Data, cephUserSecretKey) - delete(secret.Data, cephUsernameKey) - secret.Data[adminSecretNameKey] = []byte(adminSecret) - _, err = clientset.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{}) - assert.NoError(t, err) - - // Check that the cluster info can now be loaded - info, _, _, err = CreateOrLoadClusterInfo(context, namespace, ownerInfo) - assert.NoError(t, err) - assert.Equal(t, "client.admin", info.CephCred.Username) - assert.Equal(t, adminSecret, info.CephCred.Secret) - - // Fail to load the external cluster if the admin placeholder is specified - secret.Data[adminSecretNameKey] = []byte(adminSecretNameKey) - _, err = clientset.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{}) - assert.NoError(t, err) - _, _, _, err = CreateOrLoadClusterInfo(context, namespace, ownerInfo) - assert.Error(t, err) - - // Load the external cluster with the legacy external creds - secret.Name = OperatorCreds - secret.Data = map[string][]byte{ - "userID": []byte("testid"), - "userKey": []byte("testkey"), - } - _, err = clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - info, _, _, err = CreateOrLoadClusterInfo(context, namespace, ownerInfo) - assert.NoError(t, err) - assert.Equal(t, "testid", info.CephCred.Username) - assert.Equal(t, "testkey", info.CephCred.Secret) -} diff --git a/pkg/operator/ceph/cluster/mon/drain.go b/pkg/operator/ceph/cluster/mon/drain.go deleted file mode 100644 index 0a31d4187..000000000 --- a/pkg/operator/ceph/cluster/mon/drain.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "context" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/operator/k8sutil" - policyv1 "k8s.io/api/policy/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -const ( - monPDBName = "rook-ceph-mon-pdb" -) - -func (c *Cluster) reconcileMonPDB() error { - if !c.spec.DisruptionManagement.ManagePodBudgets { - //TODO: Delete mon PDB - return nil - } - - monCount := c.spec.Mon.Count - if monCount <= 2 { - logger.Debug("managePodBudgets is set, but mon-count <= 2. Not creating a disruptionbudget for Mons") - return nil - } - - op, err := c.createOrUpdateMonPDB(1) - if err != nil { - return errors.Wrapf(err, "failed to reconcile mon pdb on op %q", op) - } - return nil -} - -func (c *Cluster) createOrUpdateMonPDB(maxUnavailable int32) (controllerutil.OperationResult, error) { - usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(c.context.Clientset) - if err != nil { - return controllerutil.OperationResultNone, errors.Wrap(err, "failed to fetch pdb version") - } - objectMeta := metav1.ObjectMeta{ - Name: monPDBName, - Namespace: c.Namespace, - } - selector := &metav1.LabelSelector{ - MatchLabels: map[string]string{k8sutil.AppAttr: AppName}, - } - if usePDBV1Beta1 { - pdb := &policyv1beta1.PodDisruptionBudget{ - ObjectMeta: objectMeta} - - mutateFunc := func() error { - pdb.Spec = policyv1beta1.PodDisruptionBudgetSpec{ - Selector: selector, - MaxUnavailable: &intstr.IntOrString{IntVal: maxUnavailable}, - } - return nil - } - return controllerutil.CreateOrUpdate(context.TODO(), c.context.Client, pdb, mutateFunc) - } - pdb := &policyv1.PodDisruptionBudget{ - ObjectMeta: objectMeta} - - mutateFunc := func() error { - pdb.Spec = policyv1.PodDisruptionBudgetSpec{ - Selector: selector, - MaxUnavailable: &intstr.IntOrString{IntVal: maxUnavailable}, - } - return nil - } - return controllerutil.CreateOrUpdate(context.TODO(), c.context.Client, pdb, mutateFunc) -} - -// blockMonDrain makes MaxUnavailable in mon PDB to 0 to block any voluntary mon drains -func (c *Cluster) blockMonDrain(request types.NamespacedName) error { - if !c.spec.DisruptionManagement.ManagePodBudgets { - return nil - } - logger.Info("prevent voluntary mon drain while failing over") - // change MaxUnavailable mon PDB to 0 - _, err := c.createOrUpdateMonPDB(0) - if err != nil { - return errors.Wrapf(err, "failed to update MaxUnavailable for mon PDB %q", request.Name) - } - return nil -} - -// allowMonDrain updates the MaxUnavailable in mon PDB to 1 to allow voluntary mon drains -func (c *Cluster) allowMonDrain(request types.NamespacedName) error { - if !c.spec.DisruptionManagement.ManagePodBudgets { - return nil - } - logger.Info("allow voluntary mon drain after failover") - // change MaxUnavailable mon PDB to 1 - _, err := c.createOrUpdateMonPDB(1) - if err != nil { - return errors.Wrapf(err, "failed to update MaxUnavailable for mon PDB %q", request.Name) - } - return nil -} diff --git a/pkg/operator/ceph/cluster/mon/drain_test.go b/pkg/operator/ceph/cluster/mon/drain_test.go deleted file mode 100644 index f64e1f6e7..000000000 --- a/pkg/operator/ceph/cluster/mon/drain_test.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "context" - "sync" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - policyv1 "k8s.io/api/policy/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -const ( - mockNamespace = "test-ns" -) - -func createFakeCluster(t *testing.T, cephClusterObj *cephv1.CephCluster, k8sVersion string) *Cluster { - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - scheme := scheme.Scheme - err := policyv1.AddToScheme(scheme) - assert.NoError(t, err) - err = policyv1beta1.AddToScheme(scheme) - assert.NoError(t, err) - - cl := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects().Build() - clientset := test.New(t, 3) - c := New(&clusterd.Context{Client: cl, Clientset: clientset}, mockNamespace, cephClusterObj.Spec, ownerInfo, &sync.Mutex{}) - test.SetFakeKubernetesVersion(clientset, k8sVersion) - return c -} - -func TestReconcileMonPDB(t *testing.T) { - testCases := []struct { - name string - cephCluster *cephv1.CephCluster - expectedMaxUnAvailable int32 - errorExpected bool - }{ - { - name: "0 mons", - cephCluster: &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "rook", Namespace: mockNamespace}, - Spec: cephv1.ClusterSpec{ - DisruptionManagement: cephv1.DisruptionManagementSpec{ - ManagePodBudgets: true, - }, - }, - }, - expectedMaxUnAvailable: 0, - errorExpected: true, - }, - { - name: "3 mons", - cephCluster: &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "rook", Namespace: mockNamespace}, - Spec: cephv1.ClusterSpec{ - Mon: cephv1.MonSpec{ - Count: 3, - }, - DisruptionManagement: cephv1.DisruptionManagementSpec{ - ManagePodBudgets: true, - }, - }, - }, - expectedMaxUnAvailable: 1, - errorExpected: false, - }, - { - name: "5 mons", - cephCluster: &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "rook", Namespace: mockNamespace}, - Spec: cephv1.ClusterSpec{ - Mon: cephv1.MonSpec{ - Count: 5, - }, - DisruptionManagement: cephv1.DisruptionManagementSpec{ - ManagePodBudgets: true, - }, - }, - }, - expectedMaxUnAvailable: 1, - errorExpected: false, - }, - } - - for _, tc := range testCases { - // check for PDBV1Beta1 version - c := createFakeCluster(t, tc.cephCluster, "v1.20.0") - err := c.reconcileMonPDB() - assert.NoError(t, err) - existingPDBV1Beta1 := &policyv1beta1.PodDisruptionBudget{} - err = c.context.Client.Get(context.TODO(), types.NamespacedName{Name: monPDBName, Namespace: mockNamespace}, existingPDBV1Beta1) - if tc.errorExpected { - assert.Error(t, err) - continue - } - assert.NoError(t, err) - assert.Equalf(t, tc.expectedMaxUnAvailable, int32(existingPDBV1Beta1.Spec.MaxUnavailable.IntValue()), "[%s]: incorrect minAvailable count in pdb", tc.name) - - // check for PDBV1 version - c = createFakeCluster(t, tc.cephCluster, "v1.21.0") - err = c.reconcileMonPDB() - assert.NoError(t, err) - existingPDBV1 := &policyv1.PodDisruptionBudget{} - err = c.context.Client.Get(context.TODO(), types.NamespacedName{Name: monPDBName, Namespace: mockNamespace}, existingPDBV1) - if tc.errorExpected { - assert.Error(t, err) - continue - } - assert.NoError(t, err) - assert.Equalf(t, tc.expectedMaxUnAvailable, int32(existingPDBV1.Spec.MaxUnavailable.IntValue()), "[%s]: incorrect minAvailable count in pdb", tc.name) - - // reconcile mon PDB again to test update - err = c.reconcileMonPDB() - assert.NoError(t, err) - } -} - -func TestAllowMonDrain(t *testing.T) { - fakeNamespaceName := types.NamespacedName{Namespace: mockNamespace, Name: monPDBName} - // check for PDBV1 version - c := createFakeCluster(t, &cephv1.CephCluster{ - Spec: cephv1.ClusterSpec{ - DisruptionManagement: cephv1.DisruptionManagementSpec{ - ManagePodBudgets: true, - }, - }, - }, "v1.21.0") - t.Run("allow mon drain for K8s version v1.21.0", func(t *testing.T) { - // change MaxUnavailable mon PDB to 1 - err := c.allowMonDrain(fakeNamespaceName) - assert.NoError(t, err) - existingPDBV1 := &policyv1.PodDisruptionBudget{} - err = c.context.Client.Get(context.TODO(), fakeNamespaceName, existingPDBV1) - assert.NoError(t, err) - assert.Equal(t, 1, int(existingPDBV1.Spec.MaxUnavailable.IntValue())) - }) - // check for PDBV1Beta1 version - c = createFakeCluster(t, &cephv1.CephCluster{ - Spec: cephv1.ClusterSpec{ - DisruptionManagement: cephv1.DisruptionManagementSpec{ - ManagePodBudgets: true, - }, - }, - }, "v1.20.0") - t.Run("allow mon drain for K8s version v1.20.0", func(t *testing.T) { - // change MaxUnavailable mon PDB to 1 - err := c.allowMonDrain(fakeNamespaceName) - assert.NoError(t, err) - existingPDBV1Beta1 := &policyv1beta1.PodDisruptionBudget{} - err = c.context.Client.Get(context.TODO(), fakeNamespaceName, existingPDBV1Beta1) - assert.NoError(t, err) - assert.Equal(t, 1, int(existingPDBV1Beta1.Spec.MaxUnavailable.IntValue())) - }) -} - -func TestBlockMonDrain(t *testing.T) { - fakeNamespaceName := types.NamespacedName{Namespace: mockNamespace, Name: monPDBName} - // check for PDBV1 version - c := createFakeCluster(t, &cephv1.CephCluster{ - Spec: cephv1.ClusterSpec{ - DisruptionManagement: cephv1.DisruptionManagementSpec{ - ManagePodBudgets: true, - }, - }, - }, "v1.21.0") - t.Run("block mon drain for K8s version v1.21.0", func(t *testing.T) { - // change MaxUnavailable mon PDB to 0 - err := c.blockMonDrain(fakeNamespaceName) - assert.NoError(t, err) - existingPDBV1 := &policyv1.PodDisruptionBudget{} - err = c.context.Client.Get(context.TODO(), fakeNamespaceName, existingPDBV1) - assert.NoError(t, err) - assert.Equal(t, 0, int(existingPDBV1.Spec.MaxUnavailable.IntValue())) - }) - // check for PDBV1Beta1 version - c = createFakeCluster(t, &cephv1.CephCluster{ - Spec: cephv1.ClusterSpec{ - DisruptionManagement: cephv1.DisruptionManagementSpec{ - ManagePodBudgets: true, - }, - }, - }, "v1.20.0") - t.Run("block mon drain for K8s version v1.20.0", func(t *testing.T) { - // change MaxUnavailable mon PDB to 0 - err := c.blockMonDrain(fakeNamespaceName) - assert.NoError(t, err) - existingPDBV1Beta1 := &policyv1beta1.PodDisruptionBudget{} - err = c.context.Client.Get(context.TODO(), fakeNamespaceName, existingPDBV1Beta1) - assert.NoError(t, err) - assert.Equal(t, 0, int(existingPDBV1Beta1.Spec.MaxUnavailable.IntValue())) - }) -} diff --git a/pkg/operator/ceph/cluster/mon/endpoint.go b/pkg/operator/ceph/cluster/mon/endpoint.go deleted file mode 100644 index c9cdfdffa..000000000 --- a/pkg/operator/ceph/cluster/mon/endpoint.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "fmt" - "strings" - - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" -) - -// FlattenMonEndpoints returns a comma-delimited string of all mons and endpoints in the form -// = -func FlattenMonEndpoints(mons map[string]*cephclient.MonInfo) string { - endpoints := []string{} - for _, m := range mons { - endpoints = append(endpoints, fmt.Sprintf("%s=%s", m.Name, m.Endpoint)) - } - return strings.Join(endpoints, ",") -} - -// ParseMonEndpoints parses a flattened representation of mons and endpoints in the form -// = and returns a list of Ceph mon configs. -func ParseMonEndpoints(input string) map[string]*cephclient.MonInfo { - logger.Infof("parsing mon endpoints: %s", input) - mons := map[string]*cephclient.MonInfo{} - rawMons := strings.Split(input, ",") - for _, rawMon := range rawMons { - parts := strings.Split(rawMon, "=") - if len(parts) != 2 { - logger.Warningf("ignoring invalid monitor %s", rawMon) - continue - } - mons[parts[0]] = &cephclient.MonInfo{Name: parts[0], Endpoint: parts[1]} - } - return mons -} diff --git a/pkg/operator/ceph/cluster/mon/endpoint_test.go b/pkg/operator/ceph/cluster/mon/endpoint_test.go deleted file mode 100644 index 8612755d5..000000000 --- a/pkg/operator/ceph/cluster/mon/endpoint_test.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "testing" - - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/stretchr/testify/assert" -) - -func TestMonFlattening(t *testing.T) { - - // single endpoint - mons := map[string]*cephclient.MonInfo{ - "foo": {Name: "foo", Endpoint: "1.2.3.4:5000"}, - } - flattened := FlattenMonEndpoints(mons) - assert.Equal(t, "foo=1.2.3.4:5000", flattened) - parsed := ParseMonEndpoints(flattened) - assert.Equal(t, 1, len(parsed)) - assert.Equal(t, "foo", parsed["foo"].Name) - assert.Equal(t, "1.2.3.4:5000", parsed["foo"].Endpoint) - - // multiple endpoints - mons["bar"] = &cephclient.MonInfo{Name: "bar", Endpoint: "2.3.4.5:6000"} - flattened = FlattenMonEndpoints(mons) - parsed = ParseMonEndpoints(flattened) - assert.Equal(t, 2, len(parsed)) - assert.Equal(t, "foo", parsed["foo"].Name) - assert.Equal(t, "1.2.3.4:5000", parsed["foo"].Endpoint) - assert.Equal(t, "bar", parsed["bar"].Name) - assert.Equal(t, "2.3.4.5:6000", parsed["bar"].Endpoint) -} diff --git a/pkg/operator/ceph/cluster/mon/env.go b/pkg/operator/ceph/cluster/mon/env.go deleted file mode 100644 index ff4fa230b..000000000 --- a/pkg/operator/ceph/cluster/mon/env.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" -) - -// PodNamespaceEnvVar is the cluster namespace environment var -func PodNamespaceEnvVar(namespace string) v1.EnvVar { - return v1.EnvVar{Name: k8sutil.PodNamespaceEnvVar, Value: namespace} -} - -// EndpointEnvVar is the mon endpoint environment var -func EndpointEnvVar() v1.EnvVar { - ref := &v1.ConfigMapKeySelector{LocalObjectReference: v1.LocalObjectReference{Name: EndpointConfigMapName}, Key: EndpointDataKey} - return v1.EnvVar{Name: "ROOK_MON_ENDPOINTS", ValueFrom: &v1.EnvVarSource{ConfigMapKeyRef: ref}} -} - -// SecretEnvVar is the mon secret environment var -func SecretEnvVar() v1.EnvVar { - ref := &v1.SecretKeySelector{LocalObjectReference: v1.LocalObjectReference{Name: AppName}, Key: monSecretNameKey} - return v1.EnvVar{Name: "ROOK_MON_SECRET", ValueFrom: &v1.EnvVarSource{SecretKeyRef: ref}} -} - -// CephUsernameEnvVar is the ceph username environment var -func CephUsernameEnvVar() v1.EnvVar { - ref := &v1.SecretKeySelector{LocalObjectReference: v1.LocalObjectReference{Name: AppName}, Key: cephUsernameKey} - return v1.EnvVar{Name: "ROOK_CEPH_USERNAME", ValueFrom: &v1.EnvVarSource{SecretKeyRef: ref}} -} - -// CephSecretEnvVar is the ceph secret environment var -func CephSecretEnvVar() v1.EnvVar { - ref := &v1.SecretKeySelector{LocalObjectReference: v1.LocalObjectReference{Name: AppName}, Key: cephUserSecretKey} - return v1.EnvVar{Name: "ROOK_CEPH_SECRET", ValueFrom: &v1.EnvVarSource{SecretKeyRef: ref}} -} diff --git a/pkg/operator/ceph/cluster/mon/health.go b/pkg/operator/ceph/cluster/mon/health.go deleted file mode 100644 index 0afdba352..000000000 --- a/pkg/operator/ceph/cluster/mon/health.go +++ /dev/null @@ -1,702 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "context" - "fmt" - "os" - "strings" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephutil "github.com/rook/rook/pkg/daemon/ceph/util" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -var ( - // HealthCheckInterval is the interval to check if the mons are in quorum - HealthCheckInterval = 45 * time.Second - // MonOutTimeout is the duration to wait before removing/failover to a new mon pod - MonOutTimeout = 10 * time.Minute - - retriesBeforeNodeDrainFailover = 1 - timeZero = time.Duration(0) - // Check whether mons are on the same node once per operator restart since it's a rare scheduling condition - needToCheckMonsOnSameNode = true -) - -// HealthChecker aggregates the mon/cluster info needed to check the health of the monitors -type HealthChecker struct { - monCluster *Cluster - interval time.Duration -} - -func updateMonTimeout(monCluster *Cluster) { - // If the env was passed by the operator config, use that value - // This is an old behavior where we maintain backward compatibility - monTimeoutEnv := os.Getenv("ROOK_MON_OUT_TIMEOUT") - if monTimeoutEnv != "" { - parsedInterval, err := time.ParseDuration(monTimeoutEnv) - // We ignore the error here since the default is 10min and it's unlikely to be a problem - if err == nil { - MonOutTimeout = parsedInterval - } - // No env var, let's use the CR value if any - } else { - monCRDTimeoutSetting := monCluster.spec.HealthCheck.DaemonHealth.Monitor.Timeout - if monCRDTimeoutSetting != "" { - if monTimeout, err := time.ParseDuration(monCRDTimeoutSetting); err == nil { - if monTimeout == timeZero { - logger.Warning("monitor failover is disabled") - } - MonOutTimeout = monTimeout - } - } - } - // A third case is when the CRD is not set, in which case we use the default from MonOutTimeout -} - -func updateMonInterval(monCluster *Cluster, h *HealthChecker) { - // If the env was passed by the operator config, use that value - // This is an old behavior where we maintain backward compatibility - healthCheckIntervalEnv := os.Getenv("ROOK_MON_HEALTHCHECK_INTERVAL") - if healthCheckIntervalEnv != "" { - parsedInterval, err := time.ParseDuration(healthCheckIntervalEnv) - // We ignore the error here since the default is 45s and it's unlikely to be a problem - if err == nil { - h.interval = parsedInterval - } - // No env var, let's use the CR value if any - } else { - checkInterval := monCluster.spec.HealthCheck.DaemonHealth.Monitor.Interval - // allow overriding the check interval - if checkInterval != nil { - logger.Debugf("ceph mon status in namespace %q check interval %q", monCluster.Namespace, checkInterval.Duration.String()) - h.interval = checkInterval.Duration - } - } - // A third case is when the CRD is not set, in which case we use the default from HealthCheckInterval -} - -// NewHealthChecker creates a new HealthChecker object -func NewHealthChecker(monCluster *Cluster) *HealthChecker { - h := &HealthChecker{ - monCluster: monCluster, - interval: HealthCheckInterval, - } - return h -} - -// Check periodically checks the health of the monitors -func (hc *HealthChecker) Check(stopCh chan struct{}) { - for { - // Update Mon Timeout with CR details - updateMonTimeout(hc.monCluster) - // Update Mon Interval with CR details - updateMonInterval(hc.monCluster, hc) - select { - case <-stopCh: - logger.Infof("stopping monitoring of mons in namespace %q", hc.monCluster.Namespace) - return - - case <-time.After(hc.interval): - logger.Debugf("checking health of mons") - err := hc.monCluster.checkHealth() - if err != nil { - logger.Warningf("failed to check mon health. %v", err) - } - } - } -} - -func (c *Cluster) checkHealth() error { - c.acquireOrchestrationLock() - defer c.releaseOrchestrationLock() - - // If cluster details are not initialized - if !c.ClusterInfo.IsInitialized(true) { - return errors.New("skipping mon health check since cluster details are not initialized") - } - - // If the cluster is converged and no mons were specified - if c.spec.Mon.Count == 0 && !c.spec.External.Enable { - return errors.New("skipping mon health check since there are no monitors") - } - - logger.Debugf("Checking health for mons in cluster %q", c.ClusterInfo.Namespace) - - // For an external connection we use a special function to get the status - if c.spec.External.Enable { - quorumStatus, err := cephclient.GetMonQuorumStatus(c.context, c.ClusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get external mon quorum status") - } - - err = c.handleExternalMonStatus(quorumStatus) - if err != nil { - return errors.Wrap(err, "failed to get external mon quorum status") - } - - // handle active manager - err = controller.ConfigureExternalMetricsEndpoint(c.context, c.spec.Monitoring, c.ClusterInfo, c.ownerInfo) - if err != nil { - return errors.Wrap(err, "failed to configure external metrics endpoint") - } - - return nil - } - - // connect to the mons - // get the status and check for quorum - quorumStatus, err := cephclient.GetMonQuorumStatus(c.context, c.ClusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get mon quorum status") - } - logger.Debugf("Mon quorum status: %+v", quorumStatus) - - // Use a local mon count in case the user updates the crd in another goroutine. - // We need to complete a health check with a consistent value. - desiredMonCount := c.spec.Mon.Count - logger.Debugf("targeting the mon count %d", desiredMonCount) - - // Source of truth of which mons should exist is our *clusterInfo* - monsNotFound := map[string]interface{}{} - for _, mon := range c.ClusterInfo.Monitors { - monsNotFound[mon.Name] = struct{}{} - } - - // first handle mons that are not in quorum but in the ceph mon map - // failover the unhealthy mons - allMonsInQuorum := true - for _, mon := range quorumStatus.MonMap.Mons { - inQuorum := monInQuorum(mon, quorumStatus.Quorum) - // if the mon is in quorum remove it from our check for "existence" - // else see below condition - if _, ok := monsNotFound[mon.Name]; ok { - delete(monsNotFound, mon.Name) - } else { - // when the mon isn't in the clusterInfo, but is in quorum and there are - // enough mons, remove it else remove it on the next run - if inQuorum && len(quorumStatus.MonMap.Mons) > desiredMonCount { - logger.Warningf("mon %q not in source of truth but in quorum, removing", mon.Name) - if err := c.removeMon(mon.Name); err != nil { - logger.Warningf("failed to remove mon %q. %v", mon.Name, err) - } - // only remove one extra mon per health check - return nil - } - logger.Warningf( - "mon %q not in source of truth and not in quorum, not enough mons to remove now (wanted: %d, current: %d)", - mon.Name, - desiredMonCount, - len(quorumStatus.MonMap.Mons), - ) - } - - if inQuorum { - logger.Debugf("mon %q found in quorum", mon.Name) - // delete the "timeout" for a mon if the pod is in quorum again - if _, ok := c.monTimeoutList[mon.Name]; ok { - delete(c.monTimeoutList, mon.Name) - logger.Infof("mon %q is back in quorum, removed from mon out timeout list", mon.Name) - } - continue - } - - logger.Debugf("mon %q NOT found in quorum. Mon quorum status: %+v", mon.Name, quorumStatus) - - // if the time out is set to 0 this indicate that we don't want to trigger mon failover - if MonOutTimeout == timeZero { - logger.Warningf("mon %q NOT found in quorum and health timeout is 0, mon will never fail over", mon.Name) - return nil - } - - allMonsInQuorum = false - - // If not yet set, add the current time, for the timeout - // calculation, to the list - if _, ok := c.monTimeoutList[mon.Name]; !ok { - c.monTimeoutList[mon.Name] = time.Now() - } - - // when the timeout for the mon has been reached, continue to the - // normal failover mon pod part of the code - if time.Since(c.monTimeoutList[mon.Name]) <= MonOutTimeout { - timeToFailover := int(MonOutTimeout.Seconds() - time.Since(c.monTimeoutList[mon.Name]).Seconds()) - logger.Warningf("mon %q not found in quorum, waiting for timeout (%d seconds left) before failover", mon.Name, timeToFailover) - continue - } - - // retry only once before the mon failover if the mon pod is not scheduled - monLabelSelector := fmt.Sprintf("%s=%s,%s=%s", k8sutil.AppAttr, AppName, controller.DaemonIDLabel, mon.Name) - isScheduled, err := k8sutil.IsPodScheduled(c.context.Clientset, c.Namespace, monLabelSelector) - if err != nil { - logger.Warningf("failed to check if mon %q is assigned to a node, continuing with mon failover. %v", mon.Name, err) - } else if !isScheduled && retriesBeforeNodeDrainFailover > 0 { - logger.Warningf("mon %q NOT found in quorum after timeout. Mon pod is not scheduled. Retrying with a timeout of %.2f seconds before failover", mon.Name, MonOutTimeout.Seconds()) - delete(c.monTimeoutList, mon.Name) - retriesBeforeNodeDrainFailover = retriesBeforeNodeDrainFailover - 1 - return nil - } - retriesBeforeNodeDrainFailover = 1 - - logger.Warningf("mon %q NOT found in quorum and timeout exceeded, mon will be failed over", mon.Name) - if !c.failMon(len(quorumStatus.MonMap.Mons), desiredMonCount, mon.Name) { - // The failover was skipped, so we continue to see if another mon needs to failover - continue - } - - // only deal with one unhealthy mon per health check - return nil - } - - // after all unhealthy mons have been removed or failed over - // handle all mons that haven't been in the Ceph mon map - for mon := range monsNotFound { - logger.Warningf("mon %s NOT found in ceph mon map, failover", mon) - c.failMon(len(c.ClusterInfo.Monitors), desiredMonCount, mon) - // only deal with one "not found in ceph mon map" mon per health check - return nil - } - - // create/start new mons when there are fewer mons than the desired count in the CRD - if len(quorumStatus.MonMap.Mons) < desiredMonCount { - logger.Infof("adding mons. currently %d mons are in quorum and the desired count is %d.", len(quorumStatus.MonMap.Mons), desiredMonCount) - return c.startMons(desiredMonCount) - } - - // remove extra mons if the desired count has decreased in the CRD and all the mons are currently healthy - if allMonsInQuorum && len(quorumStatus.MonMap.Mons) > desiredMonCount { - if desiredMonCount < 2 && len(quorumStatus.MonMap.Mons) == 2 { - logger.Warningf("cannot reduce mon quorum size from 2 to 1") - } else { - logger.Infof("removing an extra mon. currently %d are in quorum and only %d are desired", len(quorumStatus.MonMap.Mons), desiredMonCount) - return c.removeMon(quorumStatus.MonMap.Mons[0].Name) - } - } - - if allMonsInQuorum && len(quorumStatus.MonMap.Mons) == desiredMonCount { - // remove any pending/not needed mon canary deployment if everything is ok - logger.Debug("mon cluster is healthy, removing any existing canary deployment") - c.removeCanaryDeployments() - - // Check whether two healthy mons are on the same node when they should not be. - // This should be a rare event to find them on the same node, so we just need to check - // once per operator restart. - if needToCheckMonsOnSameNode { - needToCheckMonsOnSameNode = false - return c.evictMonIfMultipleOnSameNode() - } - } - - return nil -} - -// failMon compares the monCount against desiredMonCount -// Returns whether the failover request was attempted. If false, -// the operator should check for other mons to failover. -func (c *Cluster) failMon(monCount, desiredMonCount int, name string) bool { - if monCount > desiredMonCount { - // no need to create a new mon since we have an extra - if err := c.removeMon(name); err != nil { - logger.Errorf("failed to remove mon %q. %v", name, err) - } - } else { - if c.spec.IsStretchCluster() && name == c.arbiterMon { - // Ceph does not currently support updating the arbiter mon - // or else the mons in the two datacenters will not be aware anymore - // of the arbiter mon. Thus, disabling failover until the arbiter - // mon can be updated in ceph. - logger.Warningf("refusing to failover arbiter mon %q on a stretched cluster", name) - return false - } - - // prevent any voluntary mon drain while failing over - if err := c.blockMonDrain(types.NamespacedName{Name: monPDBName, Namespace: c.Namespace}); err != nil { - logger.Errorf("failed to block mon drain. %v", err) - } - - // bring up a new mon to replace the unhealthy mon - if err := c.failoverMon(name); err != nil { - logger.Errorf("failed to failover mon %q. %v", name, err) - } - - // allow any voluntary mon drain after failover - if err := c.allowMonDrain(types.NamespacedName{Name: monPDBName, Namespace: c.Namespace}); err != nil { - logger.Errorf("failed to allow mon drain. %v", err) - } - } - return true -} - -func (c *Cluster) removeOrphanMonResources() { - ctx := context.TODO() - if c.spec.Mon.VolumeClaimTemplate == nil { - logger.Debug("skipping check for orphaned mon pvcs since using the host path") - return - } - - logger.Info("checking for orphaned mon resources") - - opts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", k8sutil.AppAttr, AppName)} - pvcs, err := c.context.Clientset.CoreV1().PersistentVolumeClaims(c.Namespace).List(ctx, opts) - if err != nil { - logger.Infof("failed to check for orphaned mon pvcs. %v", err) - return - } - - for _, pvc := range pvcs.Items { - logger.Debugf("checking if pvc %q is orphaned", pvc.Name) - - _, err := c.context.Clientset.AppsV1().Deployments(c.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) - if err == nil { - logger.Debugf("skipping pvc removal since the mon daemon %q still requires it", pvc.Name) - continue - } - if !kerrors.IsNotFound(err) { - logger.Infof("skipping pvc removal since the mon daemon %q might still require it. %v", pvc.Name, err) - continue - } - - logger.Infof("removing pvc %q since it is no longer needed for the mon daemon", pvc.Name) - var gracePeriod int64 // delete immediately - propagation := metav1.DeletePropagationForeground - options := &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod, PropagationPolicy: &propagation} - err = c.context.Clientset.CoreV1().PersistentVolumeClaims(c.Namespace).Delete(ctx, pvc.Name, *options) - if err != nil { - logger.Warningf("failed to delete orphaned monitor pvc %q. %v", pvc.Name, err) - } - } -} - -func (c *Cluster) updateMonDeploymentReplica(name string, enabled bool) error { - ctx := context.TODO() - // get the existing deployment - d, err := c.context.Clientset.AppsV1().Deployments(c.Namespace).Get(ctx, resourceName(name), metav1.GetOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to get mon %q", name) - } - - // set the desired number of replicas - var desiredReplicas int32 - if enabled { - desiredReplicas = 1 - } - originalReplicas := *d.Spec.Replicas - d.Spec.Replicas = &desiredReplicas - - // update the deployment - logger.Infof("scaling the mon %q deployment to replica %d", name, desiredReplicas) - _, err = c.context.Clientset.AppsV1().Deployments(c.Namespace).Update(ctx, d, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to update mon %q replicas from %d to %d", name, originalReplicas, desiredReplicas) - } - return nil -} - -func (c *Cluster) failoverMon(name string) error { - logger.Infof("Failing over monitor %q", name) - - // Scale down the failed mon to allow a new one to start - if err := c.updateMonDeploymentReplica(name, false); err != nil { - // attempt to continue with the failover even if the bad mon could not be stopped - logger.Warningf("failed to stop mon %q for failover. %v", name, err) - } - newMonSucceeded := false - defer func() { - if newMonSucceeded { - // do nothing if the new mon was started successfully, the deployment will anyway be deleted - return - } - if err := c.updateMonDeploymentReplica(name, true); err != nil { - // attempt to continue even if the bad mon could not be restarted - logger.Warningf("failed to restart failed mon %q after new mon wouldn't start. %v", name, err) - } - }() - - // remove the failed mon from a local list of the existing mons for finding a stretch zone - existingMons := c.clusterInfoToMonConfig(name) - zone, err := c.findAvailableZoneIfStretched(existingMons) - if err != nil { - return errors.Wrap(err, "failed to find available stretch zone") - } - - // Start a new monitor - m := c.newMonConfig(c.maxMonID+1, zone) - logger.Infof("starting new mon: %+v", m) - - mConf := []*monConfig{m} - - // Assign the pod to a node - if err := c.assignMons(mConf); err != nil { - return errors.Wrap(err, "failed to place new mon on a node") - } - - if c.spec.Network.IsHost() { - schedule, ok := c.mapping.Schedule[m.DaemonName] - if !ok { - return errors.Errorf("mon %s doesn't exist in assignment map", m.DaemonName) - } - m.PublicIP = schedule.Address - } else { - // Create the service endpoint - serviceIP, err := c.createService(m) - if err != nil { - return errors.Wrap(err, "failed to create mon service") - } - m.PublicIP = serviceIP - } - c.ClusterInfo.Monitors[m.DaemonName] = cephclient.NewMonInfo(m.DaemonName, m.PublicIP, m.Port) - - // Start the deployment - if err := c.startDeployments(mConf, true); err != nil { - return errors.Wrapf(err, "failed to start new mon %s", m.DaemonName) - } - - // Assign to a zone if a stretch cluster - if c.spec.IsStretchCluster() { - if name == c.arbiterMon { - // Update the arbiter mon for the stretch cluster if it changed - if err := c.ConfigureArbiter(); err != nil { - return errors.Wrap(err, "failed to configure stretch arbiter") - } - } - } - - // Only increment the max mon id if the new pod started successfully - c.maxMonID++ - newMonSucceeded = true - - return c.removeMon(name) -} - -// make a best effort to remove the mon and all its resources -func (c *Cluster) removeMon(daemonName string) error { - ctx := context.TODO() - logger.Infof("ensuring removal of unhealthy monitor %s", daemonName) - - resourceName := resourceName(daemonName) - - // Remove the mon pod if it is still there - var gracePeriod int64 - propagation := metav1.DeletePropagationForeground - options := &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod, PropagationPolicy: &propagation} - if err := c.context.Clientset.AppsV1().Deployments(c.Namespace).Delete(ctx, resourceName, *options); err != nil { - if kerrors.IsNotFound(err) { - logger.Infof("dead mon %s was already gone", resourceName) - } else { - logger.Errorf("failed to remove dead mon deployment %q. %v", resourceName, err) - } - } - - // Remove the bad monitor from quorum - if err := c.removeMonitorFromQuorum(daemonName); err != nil { - logger.Errorf("failed to remove mon %q from quorum. %v", daemonName, err) - } - delete(c.ClusterInfo.Monitors, daemonName) - - delete(c.mapping.Schedule, daemonName) - - // Remove the service endpoint - if err := c.context.Clientset.CoreV1().Services(c.Namespace).Delete(ctx, resourceName, *options); err != nil { - if kerrors.IsNotFound(err) { - logger.Infof("dead mon service %s was already gone", resourceName) - } else { - logger.Errorf("failed to remove dead mon service %q. %v", resourceName, err) - } - } - - // Remove the PVC backing the mon if it existed - if err := c.context.Clientset.CoreV1().PersistentVolumeClaims(c.Namespace).Delete(ctx, resourceName, metav1.DeleteOptions{}); err != nil { - if kerrors.IsNotFound(err) { - logger.Infof("mon pvc did not exist %q", resourceName) - } else { - logger.Errorf("failed to remove dead mon pvc %q. %v", resourceName, err) - } - } - - if err := c.saveMonConfig(); err != nil { - return errors.Wrapf(err, "failed to save mon config after failing over mon %s", daemonName) - } - - // Update cluster-wide RBD bootstrap peer token since Monitors have changed - _, err := controller.CreateBootstrapPeerSecret(c.context, c.ClusterInfo, &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Name: c.ClusterInfo.NamespacedName().Name, Namespace: c.Namespace}}, c.ownerInfo) - if err != nil { - return errors.Wrap(err, "failed to update cluster rbd bootstrap peer token") - } - - return nil -} - -func (c *Cluster) removeMonitorFromQuorum(name string) error { - logger.Debugf("removing monitor %s", name) - args := []string{"mon", "remove", name} - if _, err := cephclient.NewCephCommand(c.context, c.ClusterInfo, args).Run(); err != nil { - return errors.Wrapf(err, "mon %s remove failed", name) - } - - logger.Infof("removed monitor %s", name) - return nil -} - -func (c *Cluster) handleExternalMonStatus(status cephclient.MonStatusResponse) error { - // We don't need to validate Ceph version if no image is present - if c.spec.CephVersion.Image != "" { - _, err := controller.ValidateCephVersionsBetweenLocalAndExternalClusters(c.context, c.ClusterInfo) - if err != nil { - return errors.Wrap(err, "failed to validate external ceph version") - } - } - - changed, err := c.addOrRemoveExternalMonitor(status) - if err != nil { - return errors.Wrap(err, "failed to add or remove external mon") - } - - // let's save the monitor's config if anything happened - if changed { - if err := c.saveMonConfig(); err != nil { - return errors.Wrap(err, "failed to save mon config after adding/removing external mon") - } - } - - return nil -} - -func (c *Cluster) addOrRemoveExternalMonitor(status cephclient.MonStatusResponse) (bool, error) { - var changed bool - oldClusterInfoMonitors := map[string]*cephclient.MonInfo{} - // clearing the content of clusterinfo monitors - // and populate oldClusterInfoMonitors with monitors from clusterinfo - // later c.ClusterInfo.Monitors get populated again - for monName, mon := range c.ClusterInfo.Monitors { - oldClusterInfoMonitors[mon.Name] = mon - delete(c.ClusterInfo.Monitors, monName) - } - logger.Debugf("ClusterInfo is now Empty, refilling it from status.MonMap.Mons") - - monCount := len(status.MonMap.Mons) - if monCount%2 == 0 { - logger.Warningf("external cluster mon count is even (%d), should be uneven, continuing.", monCount) - } - - if monCount == 1 { - logger.Warning("external cluster mon count is 1, consider adding new monitors.") - } - - // Iterate over the mons first and compare it with ClusterInfo - for _, mon := range status.MonMap.Mons { - inQuorum := monInQuorum(mon, status.Quorum) - // if the mon was not in clusterInfo - if _, ok := oldClusterInfoMonitors[mon.Name]; !ok { - // If the mon is part of the quorum - if inQuorum { - // let's add it to ClusterInfo - // FYI mon.PublicAddr is "10.97.171.131:6789/0" - // so we need to remove '/0' - endpointSlash := strings.Split(mon.PublicAddr, "/") - endpoint := endpointSlash[0] - - // find IP and Port of that Mon - monIP := cephutil.GetIPFromEndpoint(endpoint) - monPort := cephutil.GetPortFromEndpoint(endpoint) - logger.Infof("new external mon %q found: %s, adding it", mon.Name, endpoint) - c.ClusterInfo.Monitors[mon.Name] = cephclient.NewMonInfo(mon.Name, monIP, monPort) - } else { - logger.Debugf("mon %q is not in quorum and not in ClusterInfo", mon.Name) - } - changed = true - } else { - // mon is in ClusterInfo - logger.Debugf("mon %q is in ClusterInfo, let's test if it's in quorum", mon.Name) - if !inQuorum { - // this mon was in clusterInfo but is not part of the quorum anymore - // thus don't add it again to ClusterInfo - logger.Infof("monitor %q is not part of the external cluster monitor quorum, removing it", mon.Name) - changed = true - } else { - // this mon was in clusterInfo and is still in the quorum - // add it again - c.ClusterInfo.Monitors[mon.Name] = oldClusterInfoMonitors[mon.Name] - logger.Debugf("everything is fine mon %q in the clusterInfo and its quorum status is %v", mon.Name, inQuorum) - } - } - } - // compare old clusterInfo with new ClusterInfo - // if length differ -> the are different - // then check if all elements are the same - if len(oldClusterInfoMonitors) != len(c.ClusterInfo.Monitors) { - changed = true - } else { - for _, mon := range c.ClusterInfo.Monitors { - if old, ok := oldClusterInfoMonitors[mon.Name]; !ok || *old != *mon { - changed = true - } - } - } - - logger.Debugf("ClusterInfo.Monitors is %+v", c.ClusterInfo.Monitors) - return changed, nil -} - -func (c *Cluster) evictMonIfMultipleOnSameNode() error { - if c.spec.Mon.AllowMultiplePerNode { - logger.Debug("skipping check for multiple mons on same node since multiple mons are allowed") - return nil - } - - logger.Info("checking if multiple mons are on the same node") - - // Get all the mon pods - label := fmt.Sprintf("app=%s", AppName) - pods, err := c.context.Clientset.CoreV1().Pods(c.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: label}) - if err != nil { - return errors.Wrap(err, "failed to list mon pods") - } - - nodesToMons := map[string]string{} - for _, pod := range pods.Items { - logger.Debugf("analyzing mon pod %q on node %q", pod.Name, pod.Spec.NodeName) - if _, ok := pod.Labels["mon_canary"]; ok { - logger.Debugf("skipping mon canary pod %q", pod.Name) - continue - } - if pod.Spec.NodeName == "" { - logger.Warningf("mon %q is not assigned to a node", pod.Name) - continue - } - monName := pod.Labels["mon"] - previousMonName, ok := nodesToMons[pod.Spec.NodeName] - if !ok { - // remember this node is taken by this mon - nodesToMons[pod.Spec.NodeName] = monName - continue - } - - logger.Warningf("Both mons %q and %q are on node %q. Evicting mon %q", monName, previousMonName, pod.Spec.NodeName, monName) - return c.failoverMon(monName) - } - - return nil -} diff --git a/pkg/operator/ceph/cluster/mon/health_test.go b/pkg/operator/ceph/cluster/mon/health_test.go deleted file mode 100644 index ac67b4cc9..000000000 --- a/pkg/operator/ceph/cluster/mon/health_test.go +++ /dev/null @@ -1,522 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "reflect" - "sync" - "testing" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" - "github.com/rook/rook/pkg/operator/ceph/config" - testopk8s "github.com/rook/rook/pkg/operator/k8sutil/test" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tevino/abool" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -func TestCheckHealth(t *testing.T) { - ctx := context.TODO() - var deploymentsUpdated *[]*apps.Deployment - updateDeploymentAndWait, deploymentsUpdated = testopk8s.UpdateDeploymentAndWaitStub() - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("executing command: %s %+v", command, args) - if args[0] == "auth" && args[1] == "get-or-create-key" { - return "{\"key\":\"mysecurekey\"}", nil - } - return clienttest.MonInQuorumResponse(), nil - }, - } - clientset := test.New(t, 1) - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - context := &clusterd.Context{ - Clientset: clientset, - ConfigDir: configDir, - Executor: executor, - RequestCancelOrchestration: abool.New(), - } - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := New(context, "ns", cephv1.ClusterSpec{}, ownerInfo, &sync.Mutex{}) - // clusterInfo is nil so we return err - err := c.checkHealth() - assert.NotNil(t, err) - - setCommonMonProperties(c, 1, cephv1.MonSpec{Count: 0, AllowMultiplePerNode: true}, "myversion") - // mon count is 0 so we return err - err = c.checkHealth() - assert.NotNil(t, err) - - c.spec.Mon.Count = 3 - logger.Infof("initial mons: %v", c.ClusterInfo.Monitors) - c.waitForStart = false - defer os.RemoveAll(c.context.ConfigDir) - - c.mapping.Schedule["f"] = &MonScheduleInfo{ - Name: "node0", - Address: "", - } - c.maxMonID = 4 - - // mock out the scheduler to return node0 - waitForMonitorScheduling = func(c *Cluster, d *apps.Deployment) (SchedulingResult, error) { - node, _ := clientset.CoreV1().Nodes().Get(ctx, "node0", metav1.GetOptions{}) - return SchedulingResult{Node: node}, nil - } - - err = c.checkHealth() - assert.Nil(t, err) - logger.Infof("mons after checkHealth: %v", c.ClusterInfo.Monitors) - assert.ElementsMatch(t, []string{"rook-ceph-mon-a", "rook-ceph-mon-f"}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - err = c.failoverMon("f") - assert.Nil(t, err) - assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - newMons := []string{ - "g", - } - for _, monName := range newMons { - _, ok := c.ClusterInfo.Monitors[monName] - assert.True(t, ok, fmt.Sprintf("mon %s not found in monitor list. %v", monName, c.ClusterInfo.Monitors)) - } - - deployments, err := clientset.AppsV1().Deployments(c.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 3, len(deployments.Items)) - - // no orphan resources to remove - c.removeOrphanMonResources() - - // We expect mons to exist: a, g, h - // Check that their PVCs are not garbage collected after we create fake PVCs - badMon := "c" - goodMons := []string{"a", "g", "h"} - c.spec.Mon.VolumeClaimTemplate = &v1.PersistentVolumeClaim{} - for _, name := range append(goodMons, badMon) { - m := &monConfig{ResourceName: "rook-ceph-mon-" + name, DaemonName: name} - pvc, err := c.makeDeploymentPVC(m, true) - assert.NoError(t, err) - _, err = c.context.Clientset.CoreV1().PersistentVolumeClaims(c.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) - assert.NoError(t, err) - } - - pvcs, err := c.context.Clientset.CoreV1().PersistentVolumeClaims(c.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 4, len(pvcs.Items)) - - // pvc "c" should be removed and the others should remain - c.removeOrphanMonResources() - pvcs, err = c.context.Clientset.CoreV1().PersistentVolumeClaims(c.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 3, len(pvcs.Items)) - for _, pvc := range pvcs.Items { - found := false - for _, name := range goodMons { - if pvc.Name == "rook-ceph-mon-"+name { - found = true - break - } - } - assert.True(t, found, pvc.Name) - } -} - -func TestEvictMonOnSameNode(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 1) - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("executing command: %s %+v", command, args) - return "{\"key\":\"mysecurekey\"}", nil - }, - } - context := &clusterd.Context{Clientset: clientset, ConfigDir: configDir, Executor: executor, RequestCancelOrchestration: abool.New()} - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := New(context, "ns", cephv1.ClusterSpec{}, ownerInfo, &sync.Mutex{}) - setCommonMonProperties(c, 1, cephv1.MonSpec{Count: 0}, "myversion") - c.maxMonID = 2 - c.waitForStart = false - waitForMonitorScheduling = func(c *Cluster, d *apps.Deployment) (SchedulingResult, error) { - node, _ := clientset.CoreV1().Nodes().Get(ctx, "node0", metav1.GetOptions{}) - return SchedulingResult{Node: node}, nil - } - - c.spec.Mon.Count = 3 - createTestMonPod(t, clientset, c, "a", "node1") - - // Nothing to evict with a single mon - err := c.evictMonIfMultipleOnSameNode() - assert.NoError(t, err) - - // Create a second mon on a different node - createTestMonPod(t, clientset, c, "b", "node2") - - // Nothing to evict with where mons are on different nodes - err = c.evictMonIfMultipleOnSameNode() - assert.NoError(t, err) - - // Create a third mon on the same node as mon a - createTestMonPod(t, clientset, c, "c", "node1") - assert.Equal(t, 2, c.maxMonID) - - // Should evict either mon a or mon c since they are on the same node and failover to mon d - err = c.evictMonIfMultipleOnSameNode() - assert.NoError(t, err) - _, err = clientset.AppsV1().Deployments(c.Namespace).Get(ctx, "rook-ceph-mon-d", metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, 3, c.maxMonID) -} - -func createTestMonPod(t *testing.T, clientset kubernetes.Interface, c *Cluster, name, node string) { - m := &monConfig{ResourceName: resourceName(name), DaemonName: name, DataPathMap: &config.DataPathMap{}} - d, err := c.makeDeployment(m, false) - assert.NoError(t, err) - monPod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "mon-pod-" + name, Namespace: c.Namespace, Labels: d.Labels}, - Spec: d.Spec.Template.Spec, - } - monPod.Spec.NodeName = node - monPod.Status.Phase = v1.PodRunning - _, err = clientset.CoreV1().Pods(c.Namespace).Create(context.TODO(), monPod, metav1.CreateOptions{}) - assert.NoError(t, err) -} - -func TestScaleMonDeployment(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 1) - context := &clusterd.Context{Clientset: clientset} - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := New(context, "ns", cephv1.ClusterSpec{}, ownerInfo, &sync.Mutex{}) - setCommonMonProperties(c, 1, cephv1.MonSpec{Count: 0, AllowMultiplePerNode: true}, "myversion") - - name := "a" - c.spec.Mon.Count = 3 - logger.Infof("initial mons: %v", c.ClusterInfo.Monitors[name]) - monConfig := &monConfig{ResourceName: resourceName(name), DaemonName: name, DataPathMap: &config.DataPathMap{}} - d, err := c.makeDeployment(monConfig, false) - require.NoError(t, err) - _, err = clientset.AppsV1().Deployments(c.Namespace).Create(ctx, d, metav1.CreateOptions{}) - require.NoError(t, err) - - verifyMonReplicas(ctx, t, c, name, 1) - err = c.updateMonDeploymentReplica(name, false) - assert.NoError(t, err) - verifyMonReplicas(ctx, t, c, name, 0) - - err = c.updateMonDeploymentReplica(name, true) - assert.NoError(t, err) - verifyMonReplicas(ctx, t, c, name, 1) -} - -func verifyMonReplicas(ctx context.Context, t *testing.T, c *Cluster, name string, expected int32) { - d, err := c.context.Clientset.AppsV1().Deployments(c.Namespace).Get(ctx, resourceName("a"), metav1.GetOptions{}) - require.NoError(t, err) - assert.Equal(t, expected, *d.Spec.Replicas) -} - -func TestCheckHealthNotFound(t *testing.T) { - ctx := context.TODO() - var deploymentsUpdated *[]*apps.Deployment - updateDeploymentAndWait, deploymentsUpdated = testopk8s.UpdateDeploymentAndWaitStub() - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("executing command: %s %+v", command, args) - if args[0] == "auth" && args[1] == "get-or-create-key" { - return "{\"key\":\"mysecurekey\"}", nil - } - return clienttest.MonInQuorumResponse(), nil - }, - } - clientset := test.New(t, 1) - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - context := &clusterd.Context{ - Clientset: clientset, - ConfigDir: configDir, - Executor: executor, - RequestCancelOrchestration: abool.New(), - } - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := New(context, "ns", cephv1.ClusterSpec{}, ownerInfo, &sync.Mutex{}) - setCommonMonProperties(c, 2, cephv1.MonSpec{Count: 3, AllowMultiplePerNode: true}, "myversion") - c.waitForStart = false - defer os.RemoveAll(c.context.ConfigDir) - - c.mapping.Schedule["a"] = &MonScheduleInfo{ - Name: "node0", - } - c.mapping.Schedule["b"] = &MonScheduleInfo{ - Name: "node0", - } - c.maxMonID = 4 - - err := c.saveMonConfig() - assert.NoError(t, err) - - // Check if the two mons are found in the configmap - cm, err := c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(ctx, EndpointConfigMapName, metav1.GetOptions{}) - assert.Nil(t, err) - if cm.Data[EndpointDataKey] != "a=1.2.3.1:6789,b=1.2.3.2:6789" { - assert.Equal(t, "b=1.2.3.2:6789,a=1.2.3.1:6789", cm.Data[EndpointDataKey]) - } - - // Because the mon a isn't in the MonInQuorumResponse() this will create a new mon - delete(c.mapping.Schedule, "b") - err = c.checkHealth() - assert.Nil(t, err) - // No updates in unit tests w/ workaround - assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // recheck that the "not found" mon has been replaced with a new one - cm, err = c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(ctx, EndpointConfigMapName, metav1.GetOptions{}) - assert.Nil(t, err) - if cm.Data[EndpointDataKey] != "a=1.2.3.1:6789,f=:6789" { - assert.Equal(t, "f=:6789,a=1.2.3.1:6789", cm.Data[EndpointDataKey]) - } -} - -func TestAddRemoveMons(t *testing.T) { - var deploymentsUpdated *[]*apps.Deployment - updateDeploymentAndWait, deploymentsUpdated = testopk8s.UpdateDeploymentAndWaitStub() - - monQuorumResponse := clienttest.MonInQuorumResponse() - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("executing command: %s %+v", command, args) - if args[0] == "auth" && args[1] == "get-or-create-key" { - return "{\"key\":\"mysecurekey\"}", nil - } - return monQuorumResponse, nil - }, - } - clientset := test.New(t, 1) - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - context := &clusterd.Context{ - Clientset: clientset, - ConfigDir: configDir, - Executor: executor, - RequestCancelOrchestration: abool.New(), - } - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := New(context, "ns", cephv1.ClusterSpec{}, ownerInfo, &sync.Mutex{}) - setCommonMonProperties(c, 0, cephv1.MonSpec{Count: 5, AllowMultiplePerNode: true}, "myversion") - c.maxMonID = 0 // "a" is max mon id - c.waitForStart = false - defer os.RemoveAll(c.context.ConfigDir) - - // checking the health will increase the mons as desired all in one go - err := c.checkHealth() - assert.Nil(t, err) - assert.Equal(t, 5, len(c.ClusterInfo.Monitors), fmt.Sprintf("mons: %v", c.ClusterInfo.Monitors)) - assert.ElementsMatch(t, []string{ - // b is created first, no updates - "rook-ceph-mon-b", // b updated when c created - "rook-ceph-mon-b", "rook-ceph-mon-c", // b and c updated when d created - "rook-ceph-mon-b", "rook-ceph-mon-c", "rook-ceph-mon-d", // etc. - "rook-ceph-mon-b", "rook-ceph-mon-c", "rook-ceph-mon-d", "rook-ceph-mon-e"}, - testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // reducing the mon count to 3 will reduce the mon count once each time we call checkHealth - monQuorumResponse = clienttest.MonInQuorumResponseFromMons(c.ClusterInfo.Monitors) - c.spec.Mon.Count = 3 - err = c.checkHealth() - assert.Nil(t, err) - assert.Equal(t, 4, len(c.ClusterInfo.Monitors)) - // No updates in unit tests w/ workaround - assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // after the second call we will be down to the expected count of 3 - monQuorumResponse = clienttest.MonInQuorumResponseFromMons(c.ClusterInfo.Monitors) - err = c.checkHealth() - assert.Nil(t, err) - assert.Equal(t, 3, len(c.ClusterInfo.Monitors)) - // No updates in unit tests w/ workaround - assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // now attempt to reduce the mons down to quorum size 1 - monQuorumResponse = clienttest.MonInQuorumResponseFromMons(c.ClusterInfo.Monitors) - c.spec.Mon.Count = 1 - err = c.checkHealth() - assert.Nil(t, err) - assert.Equal(t, 2, len(c.ClusterInfo.Monitors)) - // No updates in unit tests w/ workaround - assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // cannot reduce from quorum size of 2 to 1 - monQuorumResponse = clienttest.MonInQuorumResponseFromMons(c.ClusterInfo.Monitors) - err = c.checkHealth() - assert.Nil(t, err) - assert.Equal(t, 2, len(c.ClusterInfo.Monitors)) - // No updates in unit tests w/ workaround - assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) -} - -func TestAddOrRemoveExternalMonitor(t *testing.T) { - var changed bool - var err error - - // populate fake monmap - fakeResp := cephclient.MonStatusResponse{Quorum: []int{0}} - - fakeResp.MonMap.Mons = []cephclient.MonMapEntry{ - { - Name: "a", - }, - } - fakeResp.MonMap.Mons[0].PublicAddr = "172.17.0.4:3300" - - // populate fake ClusterInfo - c := &Cluster{ClusterInfo: &cephclient.ClusterInfo{}} - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - - // - // TEST 1 - // - // both clusterInfo and mon map are identical so nil is expected - changed, err = c.addOrRemoveExternalMonitor(fakeResp) - assert.NoError(t, err) - assert.False(t, changed) - assert.Equal(t, 1, len(c.ClusterInfo.Monitors)) - - // - // TEST 2 - // - // Now let's test the case where mon disappeared from the external cluster - // ClusterInfo still has them but they are gone from the monmap. - // Thus they should be removed from ClusterInfo - c.ClusterInfo = clienttest.CreateTestClusterInfo(3) - changed, err = c.addOrRemoveExternalMonitor(fakeResp) - assert.NoError(t, err) - assert.True(t, changed) - // ClusterInfo should shrink to 1 - assert.Equal(t, 1, len(c.ClusterInfo.Monitors)) - - // - // TEST 3 - // - // Now let's add a new mon in the external cluster - // ClusterInfo should be updated with this new monitor - fakeResp.MonMap.Mons = []cephclient.MonMapEntry{ - { - Name: "a", - }, - { - Name: "b", - }, - } - fakeResp.MonMap.Mons[1].PublicAddr = "172.17.0.5:3300" - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - changed, err = c.addOrRemoveExternalMonitor(fakeResp) - assert.NoError(t, err) - assert.True(t, changed) - // ClusterInfo should now have 2 monitors - assert.Equal(t, 2, len(c.ClusterInfo.Monitors)) -} - -func TestNewHealthChecker(t *testing.T) { - c := &Cluster{spec: cephv1.ClusterSpec{HealthCheck: cephv1.CephClusterHealthCheckSpec{}}} - - type args struct { - monCluster *Cluster - } - tests := struct { - name string - args args - want *HealthChecker - }{ - "default-interval", args{c}, &HealthChecker{c, HealthCheckInterval}, - } - t.Run(tests.name, func(t *testing.T) { - if got := NewHealthChecker(tests.args.monCluster); !reflect.DeepEqual(got, tests.want) { - t.Errorf("NewHealthChecker() = %v, want %v", got, tests.want) - } - }) -} - -func TestUpdateMonTimeout(t *testing.T) { - t.Run("using default mon timeout", func(t *testing.T) { - m := &Cluster{} - updateMonTimeout(m) - assert.Equal(t, time.Minute*10, MonOutTimeout) - }) - t.Run("using env var mon timeout", func(t *testing.T) { - os.Setenv("ROOK_MON_OUT_TIMEOUT", "10s") - defer os.Unsetenv("ROOK_MON_OUT_TIMEOUT") - m := &Cluster{} - updateMonTimeout(m) - assert.Equal(t, time.Second*10, MonOutTimeout) - }) - t.Run("using spec mon timeout", func(t *testing.T) { - m := &Cluster{spec: cephv1.ClusterSpec{HealthCheck: cephv1.CephClusterHealthCheckSpec{DaemonHealth: cephv1.DaemonHealthSpec{Monitor: cephv1.HealthCheckSpec{Timeout: "1m"}}}}} - updateMonTimeout(m) - assert.Equal(t, time.Minute, MonOutTimeout) - }) -} - -func TestUpdateMonInterval(t *testing.T) { - t.Run("using default mon interval", func(t *testing.T) { - m := &Cluster{} - h := &HealthChecker{m, HealthCheckInterval} - updateMonInterval(m, h) - assert.Equal(t, time.Second*45, HealthCheckInterval) - }) - t.Run("using env var mon timeout", func(t *testing.T) { - os.Setenv("ROOK_MON_HEALTHCHECK_INTERVAL", "10s") - defer os.Unsetenv("ROOK_MON_HEALTHCHECK_INTERVAL") - m := &Cluster{} - h := &HealthChecker{m, HealthCheckInterval} - updateMonInterval(m, h) - assert.Equal(t, time.Second*10, h.interval) - }) - t.Run("using spec mon timeout", func(t *testing.T) { - tm, err := time.ParseDuration("1m") - assert.NoError(t, err) - m := &Cluster{spec: cephv1.ClusterSpec{HealthCheck: cephv1.CephClusterHealthCheckSpec{DaemonHealth: cephv1.DaemonHealthSpec{Monitor: cephv1.HealthCheckSpec{Interval: &metav1.Duration{Duration: tm}}}}}} - h := &HealthChecker{m, HealthCheckInterval} - updateMonInterval(m, h) - assert.Equal(t, time.Minute, h.interval) - }) -} diff --git a/pkg/operator/ceph/cluster/mon/mon.go b/pkg/operator/ceph/cluster/mon/mon.go deleted file mode 100644 index c6c6566b7..000000000 --- a/pkg/operator/ceph/cluster/mon/mon.go +++ /dev/null @@ -1,1430 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package mon provides methods for creating clusters of Ceph mons in Kubernetes, for monitoring the -// cluster's status, for taking corrective actions if the status is non-ideal, and for reporting -// mon cluster failures. -package mon - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephutil "github.com/rook/rook/pkg/daemon/ceph/util" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/csi" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/wait" -) - -const ( - // EndpointConfigMapName is the name of the configmap with mon endpoints - EndpointConfigMapName = "rook-ceph-mon-endpoints" - // EndpointDataKey is the name of the key inside the mon configmap to get the endpoints - EndpointDataKey = "data" - // MaxMonIDKey is the name of the max mon id used - MaxMonIDKey = "maxMonId" - // MappingKey is the name of the mapping for the mon->node and node->port - MappingKey = "mapping" - - // AppName is the name of the secret storing cluster mon.admin key, fsid and name - AppName = "rook-ceph-mon" - // OperatorCreds is the name of the secret - OperatorCreds = "rook-ceph-operator-creds" - monClusterAttr = "mon_cluster" - fsidSecretNameKey = "fsid" - monSecretNameKey = "mon-secret" - // AdminSecretName is the name of the admin secret - adminSecretNameKey = "admin-secret" - cephUsernameKey = "ceph-username" - cephUserSecretKey = "ceph-secret" - - // DefaultMonCount Default mon count for a cluster - DefaultMonCount = 3 - // MaxMonCount Maximum allowed mon count for a cluster - MaxMonCount = 9 - - // DefaultMsgr1Port is the default port Ceph mons use to communicate amongst themselves prior - // to Ceph Nautilus. - DefaultMsgr1Port int32 = 6789 - // DefaultMsgr2Port is the listening port of the messenger v2 protocol introduced in Ceph - // Nautilus. In Nautilus and a few Ceph releases after, Ceph can use both v1 and v2 protocols. - DefaultMsgr2Port int32 = 3300 - - // minimum amount of memory in MB to run the pod - cephMonPodMinimumMemory uint64 = 1024 - - // default storage request size for ceph monitor pvc - // https://docs.ceph.com/docs/master/start/hardware-recommendations/#monitors-and-managers-ceph-mon-and-ceph-mgr - cephMonDefaultStorageRequest = "10Gi" - - // canary pod scheduling uses retry loops when cleaning up previous canary - // pods and waiting for kubernetes scheduling to complete. - canaryRetries = 30 - canaryRetryDelaySeconds = 5 -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-mon") - - // hook for tests to override - waitForMonitorScheduling = realWaitForMonitorScheduling -) - -// Cluster represents the Rook and environment configuration settings needed to set up Ceph mons. -type Cluster struct { - ClusterInfo *cephclient.ClusterInfo - context *clusterd.Context - spec cephv1.ClusterSpec - Namespace string - Keyring string - rookVersion string - orchestrationMutex sync.Mutex - Port int32 - maxMonID int - waitForStart bool - monTimeoutList map[string]time.Time - mapping *Mapping - ownerInfo *k8sutil.OwnerInfo - csiConfigMutex *sync.Mutex - isUpgrade bool - arbiterMon string -} - -// monConfig for a single monitor -type monConfig struct { - // ResourceName is the name given to the mon's Kubernetes resources in metadata - ResourceName string - // DaemonName is the name given the mon daemon ("a", "b", "c,", etc.) - DaemonName string - // PublicIP is the IP of the mon's service that the mon will receive connections on - PublicIP string - // Port is the port on which the mon will listen for connections - Port int32 - // The zone used for a stretch cluster - Zone string - // DataPathMap is the mapping relationship between mon data stored on the host and mon data - // stored in containers. - DataPathMap *config.DataPathMap -} - -// Mapping is mon node and port mapping -type Mapping struct { - // This isn't really node info since it could also be for zones, but we leave it as "node" for backward compatibility. - Schedule map[string]*MonScheduleInfo `json:"node"` -} - -// MonScheduleInfo contains name and address of a node. -type MonScheduleInfo struct { - // Name of the node. **json names are capitalized for backwards compat** - Name string `json:"Name,omitempty"` - Hostname string `json:"Hostname,omitempty"` - Address string `json:"Address,omitempty"` - Zone string `json:"zone,omitempty"` -} - -type SchedulingResult struct { - Node *v1.Node - CanaryDeployment *apps.Deployment - CanaryPVC string -} - -// New creates an instance of a mon cluster -func New(context *clusterd.Context, namespace string, spec cephv1.ClusterSpec, ownerInfo *k8sutil.OwnerInfo, csiConfigMutex *sync.Mutex) *Cluster { - return &Cluster{ - context: context, - spec: spec, - Namespace: namespace, - maxMonID: -1, - waitForStart: true, - monTimeoutList: map[string]time.Time{}, - mapping: &Mapping{ - Schedule: map[string]*MonScheduleInfo{}, - }, - ownerInfo: ownerInfo, - csiConfigMutex: csiConfigMutex, - } -} - -// Start begins the process of running a cluster of Ceph mons. -func (c *Cluster) Start(clusterInfo *cephclient.ClusterInfo, rookVersion string, cephVersion cephver.CephVersion, spec cephv1.ClusterSpec) (*cephclient.ClusterInfo, error) { - - // Only one goroutine can orchestrate the mons at a time - c.acquireOrchestrationLock() - defer c.releaseOrchestrationLock() - - clusterInfo.OwnerInfo = c.ownerInfo - c.ClusterInfo = clusterInfo - c.rookVersion = rookVersion - c.spec = spec - - // fail if we were instructed to deploy more than one mon on the same machine with host networking - if c.spec.Network.IsHost() && c.spec.Mon.AllowMultiplePerNode && c.spec.Mon.Count > 1 { - return nil, errors.Errorf("refusing to deploy %d monitors on the same host with host networking and allowMultiplePerNode is %t. only one monitor per node is allowed", c.spec.Mon.Count, c.spec.Mon.AllowMultiplePerNode) - } - - // Validate pod's memory if specified - err := controller.CheckPodMemory(cephv1.ResourcesKeyMon, cephv1.GetMonResources(c.spec.Resources), cephMonPodMinimumMemory) - if err != nil { - return nil, errors.Wrap(err, "error checking pod memory") - } - - logger.Infof("start running mons") - - logger.Debugf("establishing ceph cluster info") - if err := c.initClusterInfo(cephVersion); err != nil { - return nil, errors.Wrap(err, "failed to initialize ceph cluster info") - } - - logger.Infof("targeting the mon count %d", c.spec.Mon.Count) - - // create the mons for a new cluster or ensure mons are running in an existing cluster - return c.ClusterInfo, c.startMons(c.spec.Mon.Count) -} - -func (c *Cluster) startMons(targetCount int) error { - // init the mon config - existingCount, mons, err := c.initMonConfig(targetCount) - if err != nil { - return errors.Wrap(err, "failed to init mon config") - } - - // Assign the mons to nodes - if err := c.assignMons(mons); err != nil { - return errors.Wrap(err, "failed to assign pods to mons") - } - - // The centralized mon config database can only be used if there is at least one mon - // operational. If we are starting mons, and one is already up, then there is a cluster already - // created, and we can immediately set values in the config database. The goal is to set configs - // only once and do it as early as possible in the mon orchestration. - setConfigsNeedsRetry := false - if existingCount > 0 { - err := config.SetOrRemoveDefaultConfigs(c.context, c.ClusterInfo, c.spec) - if err != nil { - // If we fail here, it could be because the mons are not healthy, and this might be - // fixed by updating the mon deployments. Instead of returning error here, log a - // warning, and retry setting this later. - setConfigsNeedsRetry = true - logger.Warningf("failed to set Rook and/or user-defined Ceph config options before starting mons; will retry after starting mons. %v", err) - } - } - - if existingCount < len(mons) { - // Start the new mons one at a time - for i := existingCount; i < targetCount; i++ { - // Check whether we need to cancel the orchestration - if err := controller.CheckForCancelledOrchestration(c.context); err != nil { - return err - } - - if err := c.ensureMonsRunning(mons, i, targetCount, true); err != nil { - return err - } - - // If this is the first mon being created, we have to wait until it is created to set - // values in the config database. Do this only when the existing count is zero so that - // this is only done once when the cluster is created. - if existingCount == 0 { - err := config.SetOrRemoveDefaultConfigs(c.context, c.ClusterInfo, c.spec) - if err != nil { - return errors.Wrap(err, "failed to set Rook and/or user-defined Ceph config options after creating the first mon") - } - } else if setConfigsNeedsRetry && i == existingCount { - // Or if we need to retry, only do this when we are on the first iteration of the - // loop. This could be in the same if statement as above, but separate it to get a - // different error message. - err := config.SetOrRemoveDefaultConfigs(c.context, c.ClusterInfo, c.spec) - if err != nil { - return errors.Wrap(err, "failed to set Rook and/or user-defined Ceph config options after updating the existing mons") - } - } - } - } else { - // Ensure all the expected mon deployments exist, but don't require full quorum to continue - lastMonIndex := len(mons) - 1 - if err := c.ensureMonsRunning(mons, lastMonIndex, targetCount, false); err != nil { - return err - } - - if setConfigsNeedsRetry { - err := config.SetOrRemoveDefaultConfigs(c.context, c.ClusterInfo, c.spec) - if err != nil { - return errors.Wrap(err, "failed to set Rook and/or user-defined Ceph config options after forcefully updating the existing mons") - } - } - } - - if c.spec.IsStretchCluster() { - if err := c.configureStretchCluster(mons); err != nil { - return errors.Wrap(err, "failed to configure stretch mons") - } - } - - logger.Debugf("mon endpoints used are: %s", FlattenMonEndpoints(c.ClusterInfo.Monitors)) - - // reconcile mon PDB - if err := c.reconcileMonPDB(); err != nil { - return errors.Wrap(err, "failed to reconcile mon PDB") - } - - // Check if there are orphaned mon resources that should be cleaned up at the end of a reconcile. - // There may be orphaned resources if a mon failover was aborted. - c.removeOrphanMonResources() - - return nil -} - -func (c *Cluster) configureStretchCluster(mons []*monConfig) error { - // Enable the mon connectivity strategy - if err := cephclient.EnableStretchElectionStrategy(c.context, c.ClusterInfo); err != nil { - return errors.Wrap(err, "failed to enable stretch cluster") - } - - // Create the default crush rule for stretch clusters, that by default will also apply to all pools - if err := cephclient.CreateDefaultStretchCrushRule(c.context, c.ClusterInfo, &c.spec, c.stretchFailureDomainName()); err != nil { - return errors.Wrap(err, "failed to create default stretch rule") - } - - return nil -} - -func (c *Cluster) getArbiterZone() string { - for _, zone := range c.spec.Mon.StretchCluster.Zones { - if zone.Arbiter { - return zone.Name - } - } - return "" -} - -func (c *Cluster) isArbiterZone(zone string) bool { - if !c.spec.IsStretchCluster() { - return false - } - return c.getArbiterZone() == zone -} - -func (c *Cluster) ConfigureArbiter() error { - if c.arbiterMon == "" { - return errors.New("arbiter not specified for the stretch cluster") - } - - monDump, err := cephclient.GetMonDump(c.context, c.ClusterInfo) - if err != nil { - logger.Warningf("attempting to enable arbiter after failed to detect if already enabled. %v", err) - } else if monDump.StretchMode { - logger.Infof("stretch mode is already enabled") - return nil - } - - // Wait for the CRUSH map to have at least two zones - // The timeout is relatively short since the operator will requeue the reconcile - // and try again at a higher level if not yet found - failureDomain := c.stretchFailureDomainName() - logger.Infof("enabling stretch mode... waiting for two failure domains of type %q to be found in the CRUSH map after OSD initialization", failureDomain) - pollInterval := 5 * time.Second - totalWaitTime := 2 * time.Minute - err = wait.Poll(pollInterval, totalWaitTime, func() (bool, error) { - return c.readyToConfigureArbiter(true) - }) - if err != nil { - return errors.Wrapf(err, "failed to find two failure domains %q in the CRUSH map", failureDomain) - } - - // Set the mon tiebreaker - if err := cephclient.SetMonStretchTiebreaker(c.context, c.ClusterInfo, c.arbiterMon, failureDomain); err != nil { - return errors.Wrap(err, "failed to set mon tiebreaker") - } - - return nil -} - -func (c *Cluster) readyToConfigureArbiter(checkOSDPods bool) (bool, error) { - failureDomain := c.stretchFailureDomainName() - - if checkOSDPods { - // Wait for the OSD pods to be running - // can't use osd.AppName due to a circular dependency - allRunning, err := k8sutil.PodsWithLabelAreAllRunning(c.context.Clientset, c.Namespace, fmt.Sprintf("%s=rook-ceph-osd", k8sutil.AppAttr)) - if err != nil { - return false, errors.Wrap(err, "failed to check whether all osds are running before enabling the arbiter") - } - if !allRunning { - logger.Infof("waiting for all OSD pods to be in running state") - return false, nil - } - } - - crushMap, err := cephclient.GetCrushMap(c.context, c.ClusterInfo) - if err != nil { - return false, errors.Wrap(err, "failed to get crush map") - } - - // Check if the crush rule already exists - zoneCount := 0 - zoneWeight := -1 - for _, bucket := range crushMap.Buckets { - if bucket.TypeName == failureDomain { - // skip zones specific to device classes - if strings.Index(bucket.Name, "~") > 0 { - logger.Debugf("skipping device class bucket %q", bucket.Name) - continue - } - logger.Infof("found %s %q in CRUSH map with weight %d", failureDomain, bucket.Name, bucket.Weight) - zoneCount++ - - // check that the weights of the failure domains are all the same - if zoneWeight == -1 { - // found the first matching bucket - zoneWeight = bucket.Weight - } else if zoneWeight != bucket.Weight { - logger.Infof("found failure domains that have different weights") - return false, nil - } - } - } - if zoneCount < 2 { - // keep waiting to see if more zones will be created - return false, nil - } - if zoneCount > 2 { - return false, fmt.Errorf("cannot configure stretch cluster with more than 2 failure domains, and found %d of type %q", zoneCount, failureDomain) - } - logger.Infof("found two expected failure domains %q for the stretch cluster", failureDomain) - return true, nil -} - -// ensureMonsRunning is called in two scenarios: -// 1. To create a new mon and wait for it to join quorum (requireAllInQuorum = true). This method will be called multiple times -// to add a mon until we have reached the desired number of mons. -// 2. To check that the majority of existing mons are in quorum. It is ok if not all mons are in quorum. (requireAllInQuorum = false) -// This is needed when the operator is restarted and all mons may not be up or in quorum. -func (c *Cluster) ensureMonsRunning(mons []*monConfig, i, targetCount int, requireAllInQuorum bool) error { - if requireAllInQuorum { - logger.Infof("creating mon %s", mons[i].DaemonName) - } else { - logger.Info("checking for basic quorum with existing mons") - } - - // Calculate how many mons we expected to exist after this method is completed. - // If we are adding a new mon, we expect one more than currently exist. - // If we haven't created all the desired mons already, we will be adding a new one with this iteration - expectedMonCount := len(c.ClusterInfo.Monitors) - if expectedMonCount < targetCount { - expectedMonCount++ - } - - // Init the mon IPs - if err := c.initMonIPs(mons[0:expectedMonCount]); err != nil { - return errors.Wrap(err, "failed to init mon services") - } - - // save the mon config after we have "initiated the IPs" - if err := c.saveMonConfig(); err != nil { - return errors.Wrap(err, "failed to save mons") - } - - // Start the deployment - if err := c.startDeployments(mons[0:expectedMonCount], requireAllInQuorum); err != nil { - return errors.Wrap(err, "failed to start mon pods") - } - - return nil -} - -// initClusterInfo retrieves the ceph cluster info if it already exists. -// If a new cluster, create new keys. -func (c *Cluster) initClusterInfo(cephVersion cephver.CephVersion) error { - var err error - - // get the cluster info from secret - c.ClusterInfo, c.maxMonID, c.mapping, err = CreateOrLoadClusterInfo(c.context, c.Namespace, c.ownerInfo) - if err != nil { - return errors.Wrap(err, "failed to get cluster info") - } - - c.ClusterInfo.CephVersion = cephVersion - c.ClusterInfo.OwnerInfo = c.ownerInfo - - // save cluster monitor config - if err = c.saveMonConfig(); err != nil { - return errors.Wrap(err, "failed to save mons") - } - - k := keyring.GetSecretStore(c.context, c.ClusterInfo, c.ownerInfo) - // store the keyring which all mons share - if err := k.CreateOrUpdate(keyringStoreName, c.genMonSharedKeyring()); err != nil { - return errors.Wrap(err, "failed to save mon keyring secret") - } - // also store the admin keyring for other daemons that might need it during init - if err := k.Admin().CreateOrUpdate(c.ClusterInfo); err != nil { - return errors.Wrap(err, "failed to save admin keyring secret") - } - - return nil -} - -func (c *Cluster) initMonConfig(size int) (int, []*monConfig, error) { - - // initialize the mon pod info for mons that have been previously created - mons := c.clusterInfoToMonConfig("") - - // initialize mon info if we don't have enough mons (at first startup) - existingCount := len(c.ClusterInfo.Monitors) - for i := len(c.ClusterInfo.Monitors); i < size; i++ { - c.maxMonID++ - zone, err := c.findAvailableZoneIfStretched(mons) - if err != nil { - return existingCount, mons, errors.Wrap(err, "stretch zone not available") - } - mons = append(mons, c.newMonConfig(c.maxMonID, zone)) - } - - return existingCount, mons, nil -} - -func (c *Cluster) clusterInfoToMonConfig(excludedMon string) []*monConfig { - mons := []*monConfig{} - for _, monitor := range c.ClusterInfo.Monitors { - if monitor.Name == excludedMon { - // Skip a mon if it is being failed over - continue - } - var zone string - schedule := c.mapping.Schedule[monitor.Name] - if schedule != nil { - zone = schedule.Zone - } - mons = append(mons, &monConfig{ - ResourceName: resourceName(monitor.Name), - DaemonName: monitor.Name, - Port: cephutil.GetPortFromEndpoint(monitor.Endpoint), - PublicIP: cephutil.GetIPFromEndpoint(monitor.Endpoint), - Zone: zone, - DataPathMap: config.NewStatefulDaemonDataPathMap(c.spec.DataDirHostPath, dataDirRelativeHostPath(monitor.Name), config.MonType, monitor.Name, c.Namespace), - }) - } - return mons -} - -func (c *Cluster) newMonConfig(monID int, zone string) *monConfig { - daemonName := k8sutil.IndexToName(monID) - - return &monConfig{ - ResourceName: resourceName(daemonName), - DaemonName: daemonName, - Port: DefaultMsgr1Port, - Zone: zone, - DataPathMap: config.NewStatefulDaemonDataPathMap( - c.spec.DataDirHostPath, dataDirRelativeHostPath(daemonName), config.MonType, daemonName, c.Namespace), - } -} - -func (c *Cluster) findAvailableZoneIfStretched(mons []*monConfig) (string, error) { - if !c.spec.IsStretchCluster() { - return "", nil - } - - // Build the count of current mons per zone - zoneCount := map[string]int{} - for _, m := range mons { - if m.Zone == "" { - return "", errors.Errorf("zone not found on mon %q", m.DaemonName) - } - zoneCount[m.Zone]++ - } - - // Find a zone in the stretch cluster that still needs an assignment - for _, zone := range c.spec.Mon.StretchCluster.Zones { - count, ok := zoneCount[zone.Name] - if !ok { - // The zone isn't currently assigned to any mon, so return it - return zone.Name, nil - } - if c.spec.Mon.Count == 5 && count == 1 && !zone.Arbiter { - // The zone only has 1 mon assigned, but needs 2 mons since it is not the arbiter - return zone.Name, nil - } - } - return "", errors.New("A zone is not available to assign a new mon") -} - -// resourceName ensures the mon name has the rook-ceph-mon prefix -func resourceName(name string) string { - if strings.HasPrefix(name, AppName) { - return name - } - return fmt.Sprintf("%s-%s", AppName, name) -} - -// scheduleMonitor selects a node for a monitor deployment. -// see startMon() and design/ceph/ceph-mon-pv.md for additional details. -func scheduleMonitor(c *Cluster, mon *monConfig) (*apps.Deployment, error) { - ctx := context.TODO() - // build the canary deployment. - d, err := c.makeDeployment(mon, true) - if err != nil { - return nil, err - } - d.Name += "-canary" - d.Spec.Template.ObjectMeta.Name += "-canary" - - // the canary and real monitor deployments will mount the same storage. to - // avoid issues with the real deployment, the canary should be careful not - // to modify the storage by instead running an innocuous command. - d.Spec.Template.Spec.InitContainers = []v1.Container{} - d.Spec.Template.Spec.Containers[0].Image = c.rookVersion - d.Spec.Template.Spec.Containers[0].Command = []string{"/tini"} - d.Spec.Template.Spec.Containers[0].Args = []string{"--", "sleep", "3600"} - // remove the liveness probe on the canary pod - d.Spec.Template.Spec.Containers[0].LivenessProbe = nil - - // setup affinity settings for pod scheduling - p := c.getMonPlacement(mon.Zone) - p.ApplyToPodSpec(&d.Spec.Template.Spec) - k8sutil.SetNodeAntiAffinityForPod(&d.Spec.Template.Spec, requiredDuringScheduling(&c.spec), v1.LabelHostname, - map[string]string{k8sutil.AppAttr: AppName}, nil) - - // setup storage on the canary since scheduling will be affected when - // monitors are configured to use persistent volumes. the pvcName is set to - // the non-empty name of the PVC only when the PVC is created as a result of - // this call to the scheduler. - if c.monVolumeClaimTemplate(mon) == nil { - d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, - controller.DaemonVolumesDataHostPath(mon.DataPathMap)...) - } else { - // the pvc that is created here won't be deleted: it will be reattached - // to the real monitor deployment. - pvc, err := c.makeDeploymentPVC(mon, true) - if err != nil { - return nil, errors.Wrapf(err, "sched-mon: failed to make monitor %s pvc", d.Name) - } - - _, err = c.context.Clientset.CoreV1().PersistentVolumeClaims(c.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) - if err == nil { - logger.Infof("sched-mon: created canary monitor %s pvc %s", d.Name, pvc.Name) - } else { - if kerrors.IsAlreadyExists(err) { - logger.Debugf("sched-mon: creating mon %s pvc %s: already exists.", d.Name, pvc.Name) - } else { - return nil, errors.Wrapf(err, "sched-mon: error creating mon %s pvc %s", d.Name, pvc.Name) - } - } - - d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, - controller.DaemonVolumesDataPVC(mon.ResourceName)) - controller.AddVolumeMountSubPath(&d.Spec.Template.Spec, "ceph-daemon-data") - } - - // spin up the canary deployment. if it exists, delete it first, since if it - // already exists it may have been scheduled with a different crd config. - createdDeployment := false - for i := 0; i < canaryRetries; i++ { - _, err := c.context.Clientset.AppsV1().Deployments(c.Namespace).Create(ctx, d, metav1.CreateOptions{}) - if err == nil { - createdDeployment = true - logger.Infof("sched-mon: created canary deployment %s", d.Name) - break - } else if kerrors.IsAlreadyExists(err) { - if err := k8sutil.DeleteDeployment(c.context.Clientset, c.Namespace, d.Name); err != nil { - return nil, errors.Wrapf(err, "sched-mon: error deleting canary deployment %s", d.Name) - } - logger.Infof("sched-mon: deleted existing canary deployment %s", d.Name) - time.Sleep(time.Second * canaryRetryDelaySeconds) - } else { - return nil, errors.Wrapf(err, "sched-mon: error creating canary monitor deployment %s", d.Name) - } - } - - // failed after retrying - if !createdDeployment { - return nil, errors.Errorf("sched-mon: failed to create canary deployment %s", d.Name) - } - - // caller should arrange for the deployment to be removed - return d, nil -} - -// GetMonPlacement returns the placement for the MON service -func (c *Cluster) getMonPlacement(zone string) cephv1.Placement { - // If the mon is the arbiter in a stretch cluster and its placement is specified, return it - // without merging with the "all" placement so it can be handled separately from all other daemons - if c.isArbiterZone(zone) { - p := cephv1.GetArbiterPlacement(c.spec.Placement) - noPlacement := cephv1.Placement{} - if !reflect.DeepEqual(p, noPlacement) { - // If the arbiter placement was specified, go ahead and use it. - return p - } - } - // If not the arbiter, or the arbiter placement is not specified, fall back to the same placement used for other mons - return cephv1.GetMonPlacement(c.spec.Placement) -} - -func realWaitForMonitorScheduling(c *Cluster, d *apps.Deployment) (SchedulingResult, error) { - ctx := context.TODO() - // target node decision, and deployment/pvc to cleanup - result := SchedulingResult{} - - // wait for the scheduler to make a placement decision - for i := 0; i < canaryRetries; i++ { - // Check whether we need to cancel the orchestration - if err := controller.CheckForCancelledOrchestration(c.context); err != nil { - return result, err - } - - if i > 0 { - time.Sleep(time.Second * canaryRetryDelaySeconds) - } - - listOptions := metav1.ListOptions{ - LabelSelector: labels.Set(d.Spec.Selector.MatchLabels).String(), - } - - pods, err := c.context.Clientset.CoreV1().Pods(c.Namespace).List(ctx, listOptions) - if err != nil { - return result, errors.Wrapf(err, "sched-mon: error listing canary pods %s", d.Name) - } - - if len(pods.Items) == 0 { - logger.Infof("sched-mon: waiting for canary pod creation %s", d.Name) - continue - } - - pod := pods.Items[0] - if pod.Spec.NodeName == "" { - logger.Debugf("sched-mon: monitor %s canary pod %s not yet scheduled", d.Name, pod.Name) - continue - } - - node, err := c.context.Clientset.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) - if err != nil { - return result, errors.Wrapf(err, "sched-mon: error getting node %s", pod.Spec.NodeName) - } - - result.Node = node - logger.Infof("sched-mon: canary monitor deployment %s scheduled to %s", d.Name, node.Name) - return result, nil - } - - return result, errors.New("sched-mon: canary pod scheduling failed retries") -} - -func (c *Cluster) initMonIPs(mons []*monConfig) error { - for _, m := range mons { - if c.spec.Network.IsHost() { - logger.Infof("setting mon endpoints for hostnetwork mode") - node, ok := c.mapping.Schedule[m.DaemonName] - if !ok || node == nil { - return errors.Errorf("node for mon %q doesn't exist in assignment map", m.DaemonName) - } - m.PublicIP = node.Address - } else { - serviceIP, err := c.createService(m) - if err != nil { - return errors.Wrap(err, "failed to create mon service") - } - m.PublicIP = serviceIP - } - c.ClusterInfo.Monitors[m.DaemonName] = cephclient.NewMonInfo(m.DaemonName, m.PublicIP, m.Port) - } - - return nil -} - -// Delete mon canary deployments (and associated PVCs) using deployment labels -// to select this kind of temporary deployments -func (c *Cluster) removeCanaryDeployments() { - ctx := context.TODO() - canaryDeployments, err := k8sutil.GetDeployments(c.context.Clientset, c.Namespace, "app=rook-ceph-mon,mon_canary=true") - if err != nil { - logger.Warningf("failed to get the list of monitor canary deployments. %v", err) - return - } - - // Delete the canary mons, but don't wait for them to exit - for _, canary := range canaryDeployments.Items { - logger.Infof("cleaning up canary monitor deployment %q", canary.Name) - var gracePeriod int64 - propagation := metav1.DeletePropagationForeground - options := &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod, PropagationPolicy: &propagation} - if err := c.context.Clientset.AppsV1().Deployments(c.Namespace).Delete(ctx, canary.Name, *options); err != nil { - logger.Warningf("failed to delete canary monitor deployment %q. %v", canary.Name, err) - } - } -} - -func (c *Cluster) assignMons(mons []*monConfig) error { - // when monitors are scheduling below by invoking scheduleMonitor() a canary - // deployment and optional canary PVC are created. In order for the - // anti-affinity rules to be effective, we leave the canary pods in place - // until all of the canaries have been scheduled. Only after the - // monitor/node assignment process is complete are the canary deployments - // and pvcs removed here. - defer c.removeCanaryDeployments() - - var monSchedulingWait sync.WaitGroup - var resultLock sync.Mutex - failedMonSchedule := false - - // ensure that all monitors have either (1) a node assignment that will be - // enforced using a node selector, or (2) configuration permits k8s to handle - // scheduling for the monitor. - for _, mon := range mons { - // Check whether we need to cancel the orchestration - if err := controller.CheckForCancelledOrchestration(c.context); err != nil { - return err - } - - // scheduling for this monitor has already been completed - if _, ok := c.mapping.Schedule[mon.DaemonName]; ok { - logger.Debugf("assignmon: mon %s already scheduled", mon.DaemonName) - continue - } - - // determine a placement for the monitor. note that this scheduling is - // performed even when a node selector is not required. this may be - // non-optimal, but it is convenient to catch some failures early, - // before a decision is stored in the node mapping. - deployment, err := scheduleMonitor(c, mon) - if err != nil { - return errors.Wrap(err, "assignmon: error scheduling monitor") - } - - // start waiting for the deployment - monSchedulingWait.Add(1) - - go func(deployment *apps.Deployment, mon *monConfig) { - // signal that the mon is done scheduling - defer monSchedulingWait.Done() - - result, err := waitForMonitorScheduling(c, deployment) - if err != nil { - logger.Errorf("failed to schedule mon %q. %v", mon.DaemonName, err) - failedMonSchedule = true - return - } - - nodeChoice := result.Node - if nodeChoice == nil { - logger.Errorf("assignmon: could not schedule monitor %q", mon.DaemonName) - failedMonSchedule = true - return - } - - // store nil in the node mapping to indicate that an explicit node - // placement is not being made. otherwise, the node choice will map - // directly to a node selector on the monitor pod. - var schedule *MonScheduleInfo - if c.spec.Network.IsHost() || c.monVolumeClaimTemplate(mon) == nil { - logger.Infof("assignmon: mon %s assigned to node %s", mon.DaemonName, nodeChoice.Name) - schedule, err = getNodeInfoFromNode(*nodeChoice) - if err != nil { - logger.Errorf("assignmon: couldn't get node info for node %q. %v", nodeChoice.Name, err) - failedMonSchedule = true - return - } - } else { - logger.Infof("assignmon: mon %q placement using native scheduler", mon.DaemonName) - } - - if c.spec.IsStretchCluster() { - if schedule == nil { - schedule = &MonScheduleInfo{} - } - logger.Infof("mon %q is assigned to zone %q", mon.DaemonName, mon.Zone) - schedule.Zone = mon.Zone - } - - // protect against multiple goroutines updating the status at the same time - resultLock.Lock() - c.mapping.Schedule[mon.DaemonName] = schedule - resultLock.Unlock() - }(deployment, mon) - } - - monSchedulingWait.Wait() - if failedMonSchedule { - return errors.New("failed to schedule mons") - } - - logger.Debug("assignmons: mons have been scheduled") - return nil -} - -func (c *Cluster) monVolumeClaimTemplate(mon *monConfig) *v1.PersistentVolumeClaim { - if !c.spec.IsStretchCluster() { - return c.spec.Mon.VolumeClaimTemplate - } - - // If a stretch cluster, a zone can override the template from the default. - for _, zone := range c.spec.Mon.StretchCluster.Zones { - if zone.Name == mon.Zone { - if zone.VolumeClaimTemplate != nil { - // Found an override for the volume claim template in the zone - return zone.VolumeClaimTemplate - } - break - } - } - // Return the default template since one wasn't found in the zone - return c.spec.Mon.VolumeClaimTemplate -} - -func (c *Cluster) startDeployments(mons []*monConfig, requireAllInQuorum bool) error { - ctx := context.TODO() - if len(mons) == 0 { - return errors.New("cannot start 0 mons") - } - - // If all the mon deployments don't exist, allow the mon deployments to all be started without checking for quorum. - // This will be the case where: - // 1) New clusters where we are starting one deployment at a time. We only need to check for quorum once when we add a new mon. - // 2) Clusters being restored where no mon deployments are running. We need to start all the deployments before checking quorum. - onlyCheckQuorumOnce := false - deployments, err := c.context.Clientset.AppsV1().Deployments(c.Namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("app=%s", AppName)}) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Infof("0 of %d expected mon deployments exist. creating new deployment(s).", len(mons)) - onlyCheckQuorumOnce = true - } else { - logger.Warningf("failed to list mon deployments. attempting to continue. %v", err) - } - } - - readyReplicas := 0 - // Ensuring the mon deployments should be ready - for _, deploy := range deployments.Items { - if deploy.Status.AvailableReplicas > 0 { - readyReplicas++ - } - } - if len(deployments.Items) < len(mons) { - logger.Infof("%d of %d expected mon deployments exist. creating new deployment(s).", len(deployments.Items), len(mons)) - onlyCheckQuorumOnce = true - } else if readyReplicas == 0 { - logger.Infof("%d of %d expected mons are ready. creating or updating deployments without checking quorum in attempt to achieve a healthy mon cluster", readyReplicas, len(mons)) - onlyCheckQuorumOnce = true - } - - // Ensure each of the mons have been created. If already created, it will be a no-op. - for i := 0; i < len(mons); i++ { - schedule := c.mapping.Schedule[mons[i].DaemonName] - err := c.startMon(mons[i], schedule) - if err != nil { - if c.isUpgrade { - // if we're upgrading, we don't want to risk the health of the cluster by continuing to upgrade - // and potentially cause more mons to fail. Therefore, we abort if the mon failed to start after upgrade. - return errors.Wrapf(err, "failed to upgrade mon %q.", mons[i].DaemonName) - } - // We will attempt to start all mons, then check for quorum as needed after this. During an operator restart - // we need to do everything possible to verify the basic health of a cluster, complete the first orchestration, - // and start watching for all the CRs. If mons still have quorum we can continue with the orchestration even - // if they aren't all up. - logger.Errorf("attempting to continue after failing to start mon %q. %v", mons[i].DaemonName, err) - } - - // For the initial deployment (first creation) it's expected to not have all the monitors in quorum - // However, in an event of an update, it's crucial to proceed monitors by monitors - // At the end of the method we perform one last check where all the monitors must be in quorum - if !onlyCheckQuorumOnce || (onlyCheckQuorumOnce && i == len(mons)-1) { - requireAllInQuorum := false - err = c.waitForMonsToJoin(mons, requireAllInQuorum) - if err != nil { - return errors.Wrapf(err, "failed to check mon quorum %s", mons[i].DaemonName) - } - } - } - - logger.Infof("mons created: %d", len(mons)) - // Final verification that **all** mons are in quorum - // Do not proceed if one monitor is still syncing - // Only do this when monitors versions are different so we don't block the orchestration if a mon is down. - versions, err := cephclient.GetAllCephDaemonVersions(c.context, c.ClusterInfo) - if err != nil { - logger.Warningf("failed to get ceph daemons versions; this likely means there is no cluster yet. %v", err) - } else { - if len(versions.Mon) != 1 { - requireAllInQuorum = true - } - } - return c.waitForMonsToJoin(mons, requireAllInQuorum) -} - -func (c *Cluster) waitForMonsToJoin(mons []*monConfig, requireAllInQuorum bool) error { - if !c.waitForStart { - return nil - } - - starting := []string{} - for _, m := range mons { - starting = append(starting, m.DaemonName) - } - - // wait for the monitors to join quorum - sleepTime := 5 - err := waitForQuorumWithMons(c.context, c.ClusterInfo, starting, sleepTime, requireAllInQuorum) - if err != nil { - return errors.Wrap(err, "failed to wait for mon quorum") - } - - return nil -} - -func (c *Cluster) saveMonConfig() error { - if err := c.persistExpectedMonDaemons(); err != nil { - return errors.Wrap(err, "failed to persist expected mons") - } - - // Every time the mon config is updated, must also update the global config so that all daemons - // have the most updated version if they restart. - if err := config.GetStore(c.context, c.Namespace, c.ownerInfo).CreateOrUpdate(c.ClusterInfo); err != nil { - return errors.Wrap(err, "failed to update the global config") - } - - // write the latest config to the config dir - if err := WriteConnectionConfig(c.context, c.ClusterInfo); err != nil { - return errors.Wrap(err, "failed to write connection config for new mons") - } - - if err := csi.SaveClusterConfig(c.context.Clientset, c.Namespace, c.ClusterInfo, c.csiConfigMutex); err != nil { - return errors.Wrap(err, "failed to update csi cluster config") - } - - return nil -} - -func (c *Cluster) persistExpectedMonDaemons() error { - ctx := context.TODO() - configMap := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: EndpointConfigMapName, - Namespace: c.Namespace, - }, - } - err := c.ownerInfo.SetControllerReference(configMap) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference mon configmap %q", configMap.Name) - } - monMapping, err := json.Marshal(c.mapping) - if err != nil { - return errors.Wrap(err, "failed to marshal mon mapping") - } - - csiConfigValue, err := csi.FormatCsiClusterConfig( - c.Namespace, c.ClusterInfo.Monitors) - if err != nil { - return errors.Wrap(err, "failed to format csi config") - } - - maxMonID, err := c.getStoredMaxMonID() - if err != nil { - return errors.Wrap(err, "failed to save maxMonID") - } - - configMap.Data = map[string]string{ - EndpointDataKey: FlattenMonEndpoints(c.ClusterInfo.Monitors), - // persist the maxMonID that was previously stored in the configmap. We are likely saving info - // about scheduling of the mons, but we only want to update the maxMonID once a new mon has - // actually been started. If the operator is restarted or the reconcile is otherwise restarted, - // we want to calculate the mon scheduling next time based on the committed maxMonID, rather - // than only a mon scheduling, which may not have completed. - MaxMonIDKey: maxMonID, - MappingKey: string(monMapping), - csi.ConfigKey: csiConfigValue, - } - - if _, err := c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create mon endpoint config map") - } - - logger.Debugf("updating config map %s that already exists", configMap.Name) - if _, err = c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Update(ctx, configMap, metav1.UpdateOptions{}); err != nil { - return errors.Wrap(err, "failed to update mon endpoint config map") - } - } - logger.Infof("saved mon endpoints to config map %+v", configMap.Data) - return nil -} - -func (c *Cluster) getStoredMaxMonID() (string, error) { - configmap, err := c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(context.TODO(), EndpointConfigMapName, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return "", errors.Wrap(err, "could not load maxMonId") - } - if err == nil { - if val, ok := configmap.Data[MaxMonIDKey]; ok { - return val, nil - } - } - - // if the configmap cannot be loaded, assume a new cluster. If the mons have previously - // been created, the maxMonID will anyway analyze them to ensure the index is correct - // even if this error occurs. - logger.Infof("existing maxMonID not found or failed to load. %v", err) - return "-1", nil -} - -func (c *Cluster) commitMaxMonID(monName string) error { - committedMonID, err := k8sutil.NameToIndex(monName) - if err != nil { - return errors.Wrapf(err, "invalid mon name %q", monName) - } - - configmap, err := c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(context.TODO(), EndpointConfigMapName, metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, "failed to find existing mon endpoint config map") - } - - // set the new max key if greater - existingMax, err := strconv.Atoi(configmap.Data[MaxMonIDKey]) - if err != nil { - return errors.Wrap(err, "failed to read existing maxMonId") - } - - if existingMax >= committedMonID { - logger.Infof("no need to commit maxMonID %d since it is not greater than existing maxMonID %d", committedMonID, existingMax) - return nil - } - - logger.Infof("updating maxMonID from %d to %d after committing mon %q", existingMax, committedMonID, monName) - configmap.Data[MaxMonIDKey] = strconv.Itoa(committedMonID) - - if _, err = c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Update(context.TODO(), configmap, metav1.UpdateOptions{}); err != nil { - return errors.Wrap(err, "failed to update mon endpoint config map for the maxMonID") - } - return nil -} - -var updateDeploymentAndWait = UpdateCephDeploymentAndWait - -func (c *Cluster) updateMon(m *monConfig, d *apps.Deployment) error { - // Expand mon PVC if storage request for mon has increased in cephcluster crd - if c.monVolumeClaimTemplate(m) != nil { - desiredPvc, err := c.makeDeploymentPVC(m, false) - if err != nil { - return errors.Wrapf(err, "failed to make mon %q pvc", d.Name) - } - - existingPvc, err := c.context.Clientset.CoreV1().PersistentVolumeClaims(c.Namespace).Get(context.TODO(), m.ResourceName, metav1.GetOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to fetch pvc for mon %q", m.ResourceName) - } - k8sutil.ExpandPVCIfRequired(c.context.Client, desiredPvc, existingPvc) - } - - logger.Infof("deployment for mon %s already exists. updating if needed", - d.Name) - - err := updateDeploymentAndWait(c.context, c.ClusterInfo, d, config.MonType, m.DaemonName, c.spec.SkipUpgradeChecks, false) - if err != nil { - return errors.Wrapf(err, "failed to update mon deployment %s", m.ResourceName) - } - - return nil -} - -// startMon creates or updates a monitor deployment. -// -// The node parameter specifies the node to be used as a node selector on the -// monitor pod. It is the result of scheduling a canary pod: see -// scheduleMonitor() for more details on scheduling. -// -// The node parameter is optional. When the parameter is nil it indicates that -// the pod should not use a node selector, and should instead rely on k8s to -// perform scheduling. -// -// The following outlines the different scenarios that exist and how deployments -// should be configured w.r.t. scheduling and the use of a node selector. -// -// 1) if HostNetworking -> always use node selector. we do not want to change -// the IP address of a monitor as it is wrapped up in the monitor's identity. -// with host networking we use node selector to ensure a stable IP for each -// monitor. see scheduleMonitor() comment for more details. -// -// Note: an important assumption is that HostNetworking setting does not -// change once a cluster is created. -// -// 2) if *not* HostNetworking -> stable IP from service; may avoid node selector -// a) when creating a new deployment -// - if HostPath -> use node selector for storage/node affinity -// - if PVC -> node selector is not required -// -// b) when updating a deployment -// - if HostPath -> leave node selector as is -// - if PVC -> remove node selector, if present -// -func (c *Cluster) startMon(m *monConfig, schedule *MonScheduleInfo) error { - ctx := context.TODO() - // check if the monitor deployment already exists. if the deployment does - // exist, also determine if it using pvc storage. - pvcExists := false - deploymentExists := false - - d, err := c.makeDeployment(m, false) - if err != nil { - return err - } - - // Set the deployment hash as an annotation - err = patch.DefaultAnnotator.SetLastAppliedAnnotation(d) - if err != nil { - return errors.Wrapf(err, "failed to set annotation for deployment %q", d.Name) - } - - existingDeployment, err := c.context.Clientset.AppsV1().Deployments(c.Namespace).Get(ctx, d.Name, metav1.GetOptions{}) - if err == nil { - deploymentExists = true - pvcExists = controller.DaemonVolumesContainsPVC(existingDeployment.Spec.Template.Spec.Volumes) - } else if !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get mon deployment %s", d.Name) - } - - // persistent storage is not altered after the deployment is created. this - // means we need to be careful when updating the deployment to avoid new - // changes to the crd to change an existing pod's persistent storage. the - // deployment spec created above does not specify persistent storage. here - // we add in PVC or HostPath storage based on an existing deployment OR on - // the current state of the CRD. - if pvcExists || (!deploymentExists && c.monVolumeClaimTemplate(m) != nil) { - pvcName := m.ResourceName - d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, controller.DaemonVolumesDataPVC(pvcName)) - controller.AddVolumeMountSubPath(&d.Spec.Template.Spec, "ceph-daemon-data") - logger.Debugf("adding pvc volume source %s to mon deployment %s", pvcName, d.Name) - } else { - d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, controller.DaemonVolumesDataHostPath(m.DataPathMap)...) - logger.Debugf("adding host path volume source to mon deployment %s", d.Name) - } - - // placement settings from the CRD - var zone string - if schedule != nil { - zone = schedule.Zone - } - p := c.getMonPlacement(zone) - - p.ApplyToPodSpec(&d.Spec.Template.Spec) - if deploymentExists { - // the existing deployment may have a node selector. if the cluster - // isn't using host networking and the deployment is using pvc storage, - // then the node selector can be removed. this may happen after - // upgrading the cluster with the k8s scheduling support for monitors. - if c.spec.Network.IsHost() || !pvcExists { - p.PodAffinity = nil - p.PodAntiAffinity = nil - k8sutil.SetNodeAntiAffinityForPod(&d.Spec.Template.Spec, requiredDuringScheduling(&c.spec), v1.LabelHostname, - map[string]string{k8sutil.AppAttr: AppName}, existingDeployment.Spec.Template.Spec.NodeSelector) - } else { - k8sutil.SetNodeAntiAffinityForPod(&d.Spec.Template.Spec, requiredDuringScheduling(&c.spec), v1.LabelHostname, - map[string]string{k8sutil.AppAttr: AppName}, nil) - } - return c.updateMon(m, d) - } - - monVolumeClaim := c.monVolumeClaimTemplate(m) - if monVolumeClaim != nil { - pvc, err := c.makeDeploymentPVC(m, false) - if err != nil { - return errors.Wrapf(err, "failed to make mon %s pvc", d.Name) - } - _, err = c.context.Clientset.CoreV1().PersistentVolumeClaims(c.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) - if err != nil { - if kerrors.IsAlreadyExists(err) { - logger.Debugf("cannot create mon %s pvc %s: already exists.", d.Name, pvc.Name) - } else { - return errors.Wrapf(err, "failed to create mon %s pvc %s", d.Name, pvc.Name) - } - } - } - - var nodeSelector map[string]string - if schedule == nil || (monVolumeClaim != nil && zone != "") { - // Schedule the mon according to placement settings, and allow it to be portable among nodes if allowed by the PV - nodeSelector = nil - } else { - // Schedule the mon on a specific host if specified, or else allow it to be portable according to the PV - p.PodAffinity = nil - p.PodAntiAffinity = nil - nodeSelector = map[string]string{v1.LabelHostname: schedule.Hostname} - } - k8sutil.SetNodeAntiAffinityForPod(&d.Spec.Template.Spec, requiredDuringScheduling(&c.spec), v1.LabelHostname, - map[string]string{k8sutil.AppAttr: AppName}, nodeSelector) - - logger.Debugf("Starting mon: %+v", d.Name) - _, err = c.context.Clientset.AppsV1().Deployments(c.Namespace).Create(ctx, d, metav1.CreateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to create mon deployment %s", d.Name) - } - - // Commit the maxMonID after a mon deployment has been started (and not just scheduled) - if err := c.commitMaxMonID(m.DaemonName); err != nil { - return errors.Wrapf(err, "failed to commit maxMonId after starting mon %q", m.DaemonName) - } - - // Persist the expected list of mons to the configmap in case the operator is interrupted before the mon failover is completed - // The config on disk won't be updated until the mon failover is completed - if err := c.persistExpectedMonDaemons(); err != nil { - return errors.Wrap(err, "failed to persist expected mon daemons") - } - - return nil -} - -func waitForQuorumWithMons(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, mons []string, sleepTime int, requireAllInQuorum bool) error { - logger.Infof("waiting for mon quorum with %v", mons) - - // wait for monitors to establish quorum - retryCount := 0 - retryMax := 30 - for { - retryCount++ - if retryCount > retryMax { - return errors.New("exceeded max retry count waiting for monitors to reach quorum") - } - - if retryCount > 1 { - // only sleep after the first time - <-time.After(time.Duration(sleepTime) * time.Second) - } - - // wait for the mon pods to be running - allPodsRunning := true - var runningMonNames []string - for _, m := range mons { - running, err := k8sutil.PodsRunningWithLabel(context.Clientset, clusterInfo.Namespace, fmt.Sprintf("app=%s,mon=%s", AppName, m)) - if err != nil { - logger.Infof("failed to query mon pod status, trying again. %v", err) - continue - } - if running > 0 { - runningMonNames = append(runningMonNames, m) - } else { - allPodsRunning = false - logger.Infof("mon %s is not yet running", m) - } - } - - logger.Infof("mons running: %v", runningMonNames) - if !allPodsRunning && requireAllInQuorum { - continue - } - - // get the quorum_status response that contains info about all monitors in the mon map and - // their quorum status - monQuorumStatusResp, err := cephclient.GetMonQuorumStatus(context, clusterInfo) - if err != nil { - logger.Debugf("failed to get quorum_status. %v", err) - continue - } - - if !requireAllInQuorum { - logQuorumMembers(monQuorumStatusResp) - break - } - - // check if each of the initial monitors is in quorum - allInQuorum := true - for _, name := range mons { - if !monFoundInQuorum(name, monQuorumStatusResp) { - // found an initial monitor that is not in quorum, bail out of this retry - logger.Warningf("monitor %s is not in quorum list", name) - allInQuorum = false - break - } - } - - if allInQuorum { - logQuorumMembers(monQuorumStatusResp) - break - } - } - - return nil -} - -func logQuorumMembers(monQuorumStatusResp cephclient.MonStatusResponse) { - var monsInQuorum []string - for _, m := range monQuorumStatusResp.MonMap.Mons { - if monFoundInQuorum(m.Name, monQuorumStatusResp) { - monsInQuorum = append(monsInQuorum, m.Name) - } - } - logger.Infof("Monitors in quorum: %v", monsInQuorum) -} - -func monFoundInQuorum(name string, monQuorumStatusResp cephclient.MonStatusResponse) bool { - // first get the initial monitors corresponding mon map entry - var monMapEntry *cephclient.MonMapEntry - for i := range monQuorumStatusResp.MonMap.Mons { - if name == monQuorumStatusResp.MonMap.Mons[i].Name { - monMapEntry = &monQuorumStatusResp.MonMap.Mons[i] - break - } - } - - if monMapEntry == nil { - // found an initial monitor that is not in the mon map, bail out of this retry - logger.Warningf("failed to find initial monitor %s in mon map", name) - return false - } - - // using the current initial monitor's mon map entry, check to see if it's in the quorum list - // (a list of monitor rank values) - for _, q := range monQuorumStatusResp.Quorum { - if monMapEntry.Rank == q { - return true - } - } - - return false -} - -func requiredDuringScheduling(spec *cephv1.ClusterSpec) bool { - return spec.Network.IsHost() || !spec.Mon.AllowMultiplePerNode -} - -func (c *Cluster) acquireOrchestrationLock() { - logger.Debugf("Acquiring lock for mon orchestration") - c.orchestrationMutex.Lock() - logger.Debugf("Acquired lock for mon orchestration") -} - -func (c *Cluster) releaseOrchestrationLock() { - c.orchestrationMutex.Unlock() - logger.Debugf("Released lock for mon orchestration") -} diff --git a/pkg/operator/ceph/cluster/mon/mon_test.go b/pkg/operator/ceph/cluster/mon/mon_test.go deleted file mode 100644 index 08bd62a6c..000000000 --- a/pkg/operator/ceph/cluster/mon/mon_test.go +++ /dev/null @@ -1,709 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path" - "reflect" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" - "github.com/rook/rook/pkg/operator/ceph/config" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - "github.com/tevino/abool" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// generate a standard mon config from a mon id w/ default port and IP 2.4.6.{1,2,3,...} -// support mon ID as new ["a", "b", etc.] form or as legacy ["mon0", "mon1", etc.] form -func testGenMonConfig(monID string) *monConfig { - var moniker string - var index int - var err error - if strings.HasPrefix(monID, "mon") { // is legacy mon name - moniker = monID // keep legacy "mon#" name - index, err = strconv.Atoi(strings.Replace(monID, "mon", "", 1)) // get # off end of mon# - } else { - moniker = "mon-" + monID - index, err = k8sutil.NameToIndex(monID) - } - if err != nil { - panic(err) - } - return &monConfig{ - ResourceName: "rook-ceph-" + moniker, // rook-ceph-mon-A or rook-ceph-mon# - DaemonName: monID, // A or mon# - Port: DefaultMsgr1Port, - PublicIP: fmt.Sprintf("2.4.6.%d", index+1), - // dataDirHostPath assumed to be /var/lib/rook - DataPathMap: config.NewStatefulDaemonDataPathMap( - "/var/lib/rook", dataDirRelativeHostPath(monID), config.MonType, monID, "rook-ceph"), - } -} - -func newTestStartCluster(t *testing.T, namespace string) (*clusterd.Context, error) { - monResponse := func() (string, error) { - return clienttest.MonInQuorumResponseMany(3), nil - } - return newTestStartClusterWithQuorumResponse(t, namespace, monResponse) -} - -func newTestStartClusterWithQuorumResponse(t *testing.T, namespace string, monResponse func() (string, error)) (*clusterd.Context, error) { - clientset := test.New(t, 3) - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if strings.Contains(command, "ceph-authtool") { - err := clienttest.CreateConfigDir(path.Join(configDir, namespace)) - return "", errors.Wrap(err, "failed testing of start cluster without quorum response") - } else { - return monResponse() - } - }, - } - return &clusterd.Context{ - Clientset: clientset, - Executor: executor, - ConfigDir: configDir, - RequestCancelOrchestration: abool.New(), - }, nil -} - -func newCluster(context *clusterd.Context, namespace string, allowMultiplePerNode bool, resources v1.ResourceRequirements) *Cluster { - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - return &Cluster{ - ClusterInfo: nil, - context: context, - Namespace: namespace, - rookVersion: "myversion", - spec: cephv1.ClusterSpec{ - Mon: cephv1.MonSpec{ - Count: 3, - AllowMultiplePerNode: allowMultiplePerNode, - }, - Resources: map[string]v1.ResourceRequirements{"mon": resources}, - }, - maxMonID: -1, - waitForStart: false, - monTimeoutList: map[string]time.Time{}, - mapping: &Mapping{ - Schedule: map[string]*MonScheduleInfo{}, - }, - ownerInfo: ownerInfo, - } -} - -// setCommonMonProperties is a convenience helper for setting common test properties -func setCommonMonProperties(c *Cluster, currentMons int, mon cephv1.MonSpec, rookVersion string) { - c.ClusterInfo = clienttest.CreateTestClusterInfo(currentMons) - c.spec.Mon.Count = mon.Count - c.spec.Mon.AllowMultiplePerNode = mon.AllowMultiplePerNode - c.rookVersion = rookVersion -} - -func TestResourceName(t *testing.T) { - assert.Equal(t, "rook-ceph-mon-a", resourceName("rook-ceph-mon-a")) - assert.Equal(t, "rook-ceph-mon123", resourceName("rook-ceph-mon123")) - assert.Equal(t, "rook-ceph-mon-b", resourceName("b")) -} - -func TestStartMonDeployment(t *testing.T) { - ctx := context.TODO() - namespace := "ns" - context, err := newTestStartCluster(t, namespace) - assert.NoError(t, err) - c := newCluster(context, namespace, true, v1.ResourceRequirements{}) - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: EndpointConfigMapName}, - Data: map[string]string{"maxMonId": "1"}, - } - _, err = c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Create(ctx, cm, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Start mon a on a specific node since there is no volumeClaimTemplate - m := &monConfig{ResourceName: "rook-ceph-mon-a", DaemonName: "a", Port: 3300, PublicIP: "1.2.3.4", DataPathMap: &config.DataPathMap{}} - schedule := &MonScheduleInfo{Hostname: "host-a", Zone: "zonea"} - err = c.startMon(m, schedule) - assert.NoError(t, err) - deployment, err := c.context.Clientset.AppsV1().Deployments(c.Namespace).Get(ctx, m.ResourceName, metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, schedule.Hostname, deployment.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"]) - - // Start mon b on any node in a zone since there is a volumeClaimTemplate - m = &monConfig{ResourceName: "rook-ceph-mon-b", DaemonName: "b", Port: 3300, PublicIP: "1.2.3.5", DataPathMap: &config.DataPathMap{}} - schedule = &MonScheduleInfo{Hostname: "host-b", Zone: "zoneb"} - c.spec.Mon.VolumeClaimTemplate = &v1.PersistentVolumeClaim{} - err = c.startMon(m, schedule) - assert.NoError(t, err) - deployment, err = c.context.Clientset.AppsV1().Deployments(c.Namespace).Get(ctx, m.ResourceName, metav1.GetOptions{}) - assert.NoError(t, err) - // no node selector when there is a volumeClaimTemplate and the mon is assigned to a zone - assert.Equal(t, 0, len(deployment.Spec.Template.Spec.NodeSelector)) -} - -func TestStartMonPods(t *testing.T) { - ctx := context.TODO() - namespace := "ns" - context, err := newTestStartCluster(t, namespace) - assert.NoError(t, err) - c := newCluster(context, namespace, true, v1.ResourceRequirements{}) - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - - // start a basic cluster - _, err = c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - - validateStart(ctx, t, c) - - // starting again should be a no-op, but still results in an error - _, err = c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - - validateStart(ctx, t, c) -} - -func TestOperatorRestart(t *testing.T) { - ctx := context.TODO() - namespace := "ns" - context, err := newTestStartCluster(t, namespace) - assert.NoError(t, err) - c := newCluster(context, namespace, true, v1.ResourceRequirements{}) - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - - // start a basic cluster - info, err := c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - assert.True(t, info.IsInitialized(true)) - - validateStart(ctx, t, c) - - c = newCluster(context, namespace, true, v1.ResourceRequirements{}) - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - - // starting again should be a no-op, but will not result in an error - info, err = c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - assert.True(t, info.IsInitialized(true)) - - validateStart(ctx, t, c) -} - -// safety check that if hostNetwork is used no changes occur on an operator restart -func TestOperatorRestartHostNetwork(t *testing.T) { - ctx := context.TODO() - namespace := "ns" - context, err := newTestStartCluster(t, namespace) - assert.NoError(t, err) - - // cluster without host networking - c := newCluster(context, namespace, false, v1.ResourceRequirements{}) - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - - // start a basic cluster - info, err := c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - assert.True(t, info.IsInitialized(true)) - - validateStart(ctx, t, c) - - // cluster with host networking - c = newCluster(context, namespace, false, v1.ResourceRequirements{}) - c.spec.Network.HostNetwork = true - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - - // starting again should be a no-op, but still results in an error - info, err = c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - assert.True(t, info.IsInitialized(true), info) - - validateStart(ctx, t, c) -} - -func validateStart(ctx context.Context, t *testing.T, c *Cluster) { - s, err := c.context.Clientset.CoreV1().Secrets(c.Namespace).Get(ctx, AppName, metav1.GetOptions{}) - assert.NoError(t, err) // there shouldn't be an error due the secret existing - assert.Equal(t, 4, len(s.Data)) - - // there is only one pod created. the other two won't be created since the first one doesn't start - _, err = c.context.Clientset.AppsV1().Deployments(c.Namespace).Get(ctx, "rook-ceph-mon-a", metav1.GetOptions{}) - assert.NoError(t, err) -} - -func TestPersistMons(t *testing.T) { - clientset := test.New(t, 1) - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := New(&clusterd.Context{Clientset: clientset}, "ns", cephv1.ClusterSpec{}, ownerInfo, &sync.Mutex{}) - setCommonMonProperties(c, 1, cephv1.MonSpec{Count: 3, AllowMultiplePerNode: true}, "myversion") - - // Persist mon a - err := c.persistExpectedMonDaemons() - assert.NoError(t, err) - - cm, err := c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(context.TODO(), EndpointConfigMapName, metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, "a=1.2.3.1:6789", cm.Data[EndpointDataKey]) - - // Persist mon b, and remove mon a for simply testing the configmap is updated - c.ClusterInfo.Monitors["b"] = &cephclient.MonInfo{Name: "b", Endpoint: "4.5.6.7:3300"} - delete(c.ClusterInfo.Monitors, "a") - err = c.persistExpectedMonDaemons() - assert.NoError(t, err) - - cm, err = c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(context.TODO(), EndpointConfigMapName, metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, "b=4.5.6.7:3300", cm.Data[EndpointDataKey]) -} - -func TestSaveMonEndpoints(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 1) - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := New(&clusterd.Context{Clientset: clientset, ConfigDir: configDir}, "ns", cephv1.ClusterSpec{}, ownerInfo, &sync.Mutex{}) - setCommonMonProperties(c, 1, cephv1.MonSpec{Count: 3, AllowMultiplePerNode: true}, "myversion") - - // create the initial config map - err := c.saveMonConfig() - assert.NoError(t, err) - - cm, err := c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(ctx, EndpointConfigMapName, metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, "a=1.2.3.1:6789", cm.Data[EndpointDataKey]) - assert.Equal(t, `{"node":{}}`, cm.Data[MappingKey]) - assert.Equal(t, "-1", cm.Data[MaxMonIDKey]) - - // update the config map - c.ClusterInfo.Monitors["a"].Endpoint = "2.3.4.5:6789" - c.maxMonID = 2 - c.mapping.Schedule["a"] = &MonScheduleInfo{ - Name: "node0", - Address: "1.1.1.1", - Hostname: "myhost", - } - err = c.saveMonConfig() - assert.NoError(t, err) - - cm, err = c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(ctx, EndpointConfigMapName, metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, "a=2.3.4.5:6789", cm.Data[EndpointDataKey]) - assert.Equal(t, `{"node":{"a":{"Name":"node0","Hostname":"myhost","Address":"1.1.1.1"}}}`, cm.Data[MappingKey]) - assert.Equal(t, "-1", cm.Data[MaxMonIDKey]) - - // Update the maxMonID to some random value - cm.Data[MaxMonIDKey] = "23" - _, err = c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Update(ctx, cm, metav1.UpdateOptions{}) - assert.NoError(t, err) - // Confirm the maxMonId will be persisted and not updated to anything else. - // The value is only expected to be set directly to the configmap when a mon deployment is started. - err = c.saveMonConfig() - assert.NoError(t, err) - cm, err = c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(ctx, EndpointConfigMapName, metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, "23", cm.Data[MaxMonIDKey]) -} - -func TestMaxMonID(t *testing.T) { - clientset := test.New(t, 1) - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := New(&clusterd.Context{Clientset: clientset, ConfigDir: configDir}, "ns", cephv1.ClusterSpec{}, ownerInfo, &sync.Mutex{}) - - // when the configmap is not found, the maxMonID is -1 - maxMonID, err := c.getStoredMaxMonID() - assert.NoError(t, err) - assert.Equal(t, "-1", maxMonID) - - // initialize the configmap - setCommonMonProperties(c, 1, cephv1.MonSpec{Count: 3, AllowMultiplePerNode: true}, "myversion") - err = c.saveMonConfig() - assert.NoError(t, err) - - // invalid mon names won't update the maxMonID - err = c.commitMaxMonID("bad-id") - assert.Error(t, err) - - // starting a mon deployment will set the maxMonID - err = c.commitMaxMonID("a") - assert.NoError(t, err) - maxMonID, err = c.getStoredMaxMonID() - assert.NoError(t, err) - assert.Equal(t, "0", maxMonID) - - // set to a higher id - err = c.commitMaxMonID("d") - assert.NoError(t, err) - maxMonID, err = c.getStoredMaxMonID() - assert.NoError(t, err) - assert.Equal(t, "3", maxMonID) - - // setting to an id lower than the max will not update it - err = c.commitMaxMonID("c") - assert.NoError(t, err) - maxMonID, err = c.getStoredMaxMonID() - assert.NoError(t, err) - assert.Equal(t, "3", maxMonID) -} - -func TestMonInQuorum(t *testing.T) { - entry := cephclient.MonMapEntry{Name: "foo", Rank: 23} - quorum := []int{} - // Nothing in quorum - assert.False(t, monInQuorum(entry, quorum)) - - // One or more members in quorum - quorum = []int{23} - assert.True(t, monInQuorum(entry, quorum)) - quorum = []int{5, 6, 7, 23, 8} - assert.True(t, monInQuorum(entry, quorum)) - - // Not in quorum - entry.Rank = 1 - assert.False(t, monInQuorum(entry, quorum)) -} - -func TestNameToIndex(t *testing.T) { - // invalid - id, err := fullNameToIndex("rook-ceph-monitor0") - assert.NotNil(t, err) - assert.Equal(t, -1, id) - id, err = fullNameToIndex("rook-ceph-mon123") - assert.NotNil(t, err) - assert.Equal(t, -1, id) - - // valid - id, err = fullNameToIndex("b") - assert.NoError(t, err) - assert.Equal(t, 1, id) - id, err = fullNameToIndex("m") - assert.NoError(t, err) - assert.Equal(t, 12, id) - id, err = fullNameToIndex("rook-ceph-mon-a") - assert.NoError(t, err) - assert.Equal(t, 0, id) -} - -func TestWaitForQuorum(t *testing.T) { - namespace := "ns" - quorumChecks := 0 - quorumResponse := func() (string, error) { - mons := map[string]*cephclient.MonInfo{ - "a": {}, - } - quorumChecks++ - if quorumChecks == 1 { - // return an error the first time while we're waiting for the mon to join quorum - return "", errors.New("test error") - } - // a successful response indicates that we have quorum, even if we didn't check which specific mons were in quorum - return clienttest.MonInQuorumResponseFromMons(mons), nil - } - context, err := newTestStartClusterWithQuorumResponse(t, namespace, quorumResponse) - assert.NoError(t, err) - requireAllInQuorum := false - expectedMons := []string{"a"} - clusterInfo := &cephclient.ClusterInfo{Namespace: namespace} - err = waitForQuorumWithMons(context, clusterInfo, expectedMons, 0, requireAllInQuorum) - assert.NoError(t, err) -} - -func TestMonFoundInQuorum(t *testing.T) { - response := cephclient.MonStatusResponse{} - - // "a" is in quorum - response.Quorum = []int{0} - response.MonMap.Mons = []cephclient.MonMapEntry{ - {Name: "a", Rank: 0}, - {Name: "b", Rank: 1}, - {Name: "c", Rank: 2}, - } - assert.True(t, monFoundInQuorum("a", response)) - assert.False(t, monFoundInQuorum("b", response)) - assert.False(t, monFoundInQuorum("c", response)) - - // b and c also in quorum, but not d - response.Quorum = []int{0, 1, 2} - assert.True(t, monFoundInQuorum("a", response)) - assert.True(t, monFoundInQuorum("b", response)) - assert.True(t, monFoundInQuorum("c", response)) - assert.False(t, monFoundInQuorum("d", response)) -} - -func TestFindAvailableZoneForStretchedMon(t *testing.T) { - c := &Cluster{spec: cephv1.ClusterSpec{ - Mon: cephv1.MonSpec{ - StretchCluster: &cephv1.StretchClusterSpec{ - Zones: []cephv1.StretchClusterZoneSpec{ - {Name: "a", Arbiter: true}, - {Name: "b"}, - {Name: "c"}, - }, - }, - }, - }} - - // No mons are assigned to a zone yet - existingMons := []*monConfig{} - availableZone, err := c.findAvailableZoneIfStretched(existingMons) - assert.NoError(t, err) - assert.NotEqual(t, "", availableZone) - - // With 3 mons, we have one available zone - existingMons = []*monConfig{ - {ResourceName: "x", Zone: "a"}, - {ResourceName: "y", Zone: "b"}, - } - c.spec.Mon.Count = 3 - availableZone, err = c.findAvailableZoneIfStretched(existingMons) - assert.NoError(t, err) - assert.Equal(t, "c", availableZone) - - // With 3 mons and no available zones - existingMons = []*monConfig{ - {ResourceName: "x", Zone: "a"}, - {ResourceName: "y", Zone: "b"}, - {ResourceName: "z", Zone: "c"}, - } - c.spec.Mon.Count = 3 - availableZone, err = c.findAvailableZoneIfStretched(existingMons) - assert.Error(t, err) - assert.Equal(t, "", availableZone) - - // With 5 mons and no available zones - existingMons = []*monConfig{ - {ResourceName: "w", Zone: "a"}, - {ResourceName: "x", Zone: "b"}, - {ResourceName: "y", Zone: "b"}, - {ResourceName: "z", Zone: "c"}, - {ResourceName: "q", Zone: "c"}, - } - c.spec.Mon.Count = 5 - availableZone, err = c.findAvailableZoneIfStretched(existingMons) - assert.Error(t, err) - assert.Equal(t, "", availableZone) - - // With 5 mons and one available zone - existingMons = []*monConfig{ - {ResourceName: "w", Zone: "a"}, - {ResourceName: "x", Zone: "b"}, - {ResourceName: "y", Zone: "b"}, - {ResourceName: "z", Zone: "c"}, - } - availableZone, err = c.findAvailableZoneIfStretched(existingMons) - assert.NoError(t, err) - assert.Equal(t, "c", availableZone) - - // With 5 mons and arbiter zone is available zone - existingMons = []*monConfig{ - {ResourceName: "w", Zone: "b"}, - {ResourceName: "x", Zone: "b"}, - {ResourceName: "y", Zone: "c"}, - {ResourceName: "z", Zone: "c"}, - } - availableZone, err = c.findAvailableZoneIfStretched(existingMons) - assert.NoError(t, err) - assert.Equal(t, "a", availableZone) -} - -func TestStretchMonVolumeClaimTemplate(t *testing.T) { - generalSC := "generalSC" - zoneSC := "zoneSC" - defaultTemplate := &v1.PersistentVolumeClaim{Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &generalSC}} - zoneTemplate := &v1.PersistentVolumeClaim{Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &zoneSC}} - type fields struct { - spec cephv1.ClusterSpec - } - type args struct { - mon *monConfig - } - tests := []struct { - name string - fields fields - args args - want *v1.PersistentVolumeClaim - }{ - {"no template", fields{cephv1.ClusterSpec{}}, args{&monConfig{Zone: "z1"}}, nil}, - {"default template", fields{cephv1.ClusterSpec{Mon: cephv1.MonSpec{VolumeClaimTemplate: defaultTemplate}}}, args{&monConfig{Zone: "z1"}}, defaultTemplate}, - {"default template with 3 zones", fields{cephv1.ClusterSpec{Mon: cephv1.MonSpec{ - VolumeClaimTemplate: defaultTemplate, - StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{{Name: "z1"}, {Name: "z2"}, {Name: "z3"}}}}}}, - args{&monConfig{Zone: "z1"}}, - defaultTemplate}, - {"overridden template", fields{cephv1.ClusterSpec{Mon: cephv1.MonSpec{ - VolumeClaimTemplate: defaultTemplate, - StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{{Name: "z1", VolumeClaimTemplate: zoneTemplate}, {Name: "z2"}, {Name: "z3"}}}}}}, - args{&monConfig{Zone: "z1"}}, - zoneTemplate}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Cluster{ - spec: tt.fields.spec, - } - if got := c.monVolumeClaimTemplate(tt.args.mon); !reflect.DeepEqual(got, tt.want) { - t.Errorf("Cluster.monVolumeClaimTemplate() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestArbiterPlacement(t *testing.T) { - placement := cephv1.Placement{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: v1.NodeSelectorOpExists, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - }, - } - c := &Cluster{spec: cephv1.ClusterSpec{ - Mon: cephv1.MonSpec{ - StretchCluster: &cephv1.StretchClusterSpec{ - Zones: []cephv1.StretchClusterZoneSpec{ - {Name: "a", Arbiter: true}, - {Name: "b"}, - {Name: "c"}, - }, - }, - }, - }} - - c.spec.Placement = cephv1.PlacementSpec{} - c.spec.Placement[cephv1.KeyMonArbiter] = placement - - // No placement is found if not requesting the arbiter placement - result := c.getMonPlacement("c") - assert.Equal(t, cephv1.Placement{}, result) - - // Placement is found if requesting the arbiter - result = c.getMonPlacement("a") - assert.Equal(t, placement, result) - - // Arbiter and all mons have the same placement if no arbiter placement is specified - c.spec.Placement = cephv1.PlacementSpec{} - c.spec.Placement[cephv1.KeyMon] = placement - result = c.getMonPlacement("a") - assert.Equal(t, placement, result) - result = c.getMonPlacement("c") - assert.Equal(t, placement, result) -} - -func TestCheckIfArbiterReady(t *testing.T) { - - c := &Cluster{ - Namespace: "ns", - spec: cephv1.ClusterSpec{ - Mon: cephv1.MonSpec{ - StretchCluster: &cephv1.StretchClusterSpec{ - Zones: []cephv1.StretchClusterZoneSpec{ - {Name: "a", Arbiter: true}, - {Name: "b"}, - {Name: "c"}, - }, - }, - }, - }} - crushZoneCount := 0 - balanced := true - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - switch { - case args[0] == "osd" && args[1] == "crush" && args[2] == "dump": - crushBuckets := ` - {"id": -1,"name": "default","type_id": 10,"type_name": "root","weight": 1028}, - {"id": -2,"name": "default~hdd","type_id": 10,"type_name": "root","weight": 1028}, - {"id": -3,"name": "mynode","type_id": 1,"type_name": "host","weight": 1028}, - {"id": -4,"name": "mynode~hdd","type_id": 1,"type_name": "host","weight": 1028}` - for i := 0; i < crushZoneCount; i++ { - weight := 2056 - if !balanced && i%2 == 1 { - // simulate unbalanced with every other zone having half the weight - weight = 1028 - } - crushBuckets = crushBuckets + - fmt.Sprintf(`,{"id": -%d,"name": "zone%d","type_id": 1,"type_name": "zone","weight": %d} - ,{"id": -%d,"name": "zone%d~ssd","type_id": 1,"type_name": "zone","weight": 2056}`, i+5, i, weight, i+6, i) - } - return fmt.Sprintf(`{"buckets": [%s]}`, crushBuckets), nil - - } - return "", fmt.Errorf("unrecognized output file command: %s %v", command, args) - }, - } - c.context = &clusterd.Context{Clientset: test.New(t, 5), Executor: executor} - c.ClusterInfo = clienttest.CreateTestClusterInfo(5) - - // Not ready if no pods running - ready, err := c.readyToConfigureArbiter(true) - assert.False(t, ready) - assert.NoError(t, err) - - // For the remainder of tests, skip checking OSD pods - // Now there are not enough zones - ready, err = c.readyToConfigureArbiter(false) - assert.False(t, ready) - assert.NoError(t, err) - - // Valid - crushZoneCount = 2 - ready, err = c.readyToConfigureArbiter(false) - assert.True(t, ready) - assert.NoError(t, err) - - // Valid, except the CRUSH map is not balanced - balanced = false - ready, err = c.readyToConfigureArbiter(false) - assert.False(t, ready) - assert.NoError(t, err) - - // Too many zones in the CRUSH map - crushZoneCount = 3 - balanced = true - ready, err = c.readyToConfigureArbiter(false) - assert.False(t, ready) - assert.Error(t, err) -} diff --git a/pkg/operator/ceph/cluster/mon/node.go b/pkg/operator/ceph/cluster/mon/node.go deleted file mode 100644 index 595b37e25..000000000 --- a/pkg/operator/ceph/cluster/mon/node.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" -) - -func getNodeInfoFromNode(n v1.Node) (*MonScheduleInfo, error) { - nr := &MonScheduleInfo{ - Name: n.Name, - Hostname: n.Labels[v1.LabelHostname], - } - - for _, ip := range n.Status.Addresses { - if ip.Type == v1.NodeInternalIP { - logger.Debugf("using internal IP %s for node %s", ip.Address, n.Name) - nr.Address = ip.Address - break - } - } - if nr.Address == "" { - return nil, errors.Errorf("failed to find any internal IP on node %s", nr.Name) - } - return nr, nil -} diff --git a/pkg/operator/ceph/cluster/mon/node_test.go b/pkg/operator/ceph/cluster/mon/node_test.go deleted file mode 100644 index ec8eeb296..000000000 --- a/pkg/operator/ceph/cluster/mon/node_test.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "context" - "strings" - "sync" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" - "github.com/rook/rook/pkg/clusterd" - clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestNodeAffinity(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 4) - c := New(&clusterd.Context{Clientset: clientset}, "ns", cephv1.ClusterSpec{}, &k8sutil.OwnerInfo{}, &sync.Mutex{}) - setCommonMonProperties(c, 0, cephv1.MonSpec{Count: 3, AllowMultiplePerNode: true}, "myversion") - - c.spec.Placement = map[rook.KeyType]cephv1.Placement{} - c.spec.Placement[cephv1.KeyMon] = cephv1.Placement{NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "label", - Operator: v1.NodeSelectorOpIn, - Values: []string{"bar", "baz"}, - }, - }, - }, - }, - }, - }, - } - - // label nodes so they appear as not scheduable / invalid - node, _ := clientset.CoreV1().Nodes().Get(ctx, "node0", metav1.GetOptions{}) - node.Labels = map[string]string{"label": "foo"} - _, err := clientset.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) - assert.NoError(t, err) - - node, _ = clientset.CoreV1().Nodes().Get(ctx, "node1", metav1.GetOptions{}) - node.Labels = map[string]string{"label": "bar"} - _, err = clientset.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) - assert.NoError(t, err) - - node, _ = clientset.CoreV1().Nodes().Get(ctx, "node2", metav1.GetOptions{}) - node.Labels = map[string]string{"label": "baz"} - _, err = clientset.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) - assert.NoError(t, err) -} - -// this tests can 3 mons with hostnetworking on the same host is rejected -func TestHostNetworkSameNode(t *testing.T) { - namespace := "ns" - context, err := newTestStartCluster(t, namespace) - assert.NoError(t, err) - // cluster host networking - c := newCluster(context, namespace, true, v1.ResourceRequirements{}) - c.spec.Network.HostNetwork = true - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - - // start a basic cluster - _, err = c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.Error(t, err) -} - -func TestPodMemory(t *testing.T) { - namespace := "ns" - context, err := newTestStartCluster(t, namespace) - assert.NoError(t, err) - // Test memory limit alone - r := v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(536870912, resource.BinarySI), // size in Bytes - }, - } - - c := newCluster(context, namespace, true, r) - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - // start a basic cluster - _, err = c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - - // Test REQUEST == LIMIT - r = v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(536870912, resource.BinarySI), // size in Bytes - }, - Requests: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(536870912, resource.BinarySI), // size in Bytes - }, - } - - c = newCluster(context, namespace, true, r) - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - // start a basic cluster - _, err = c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - - // Test LIMIT != REQUEST but obviously LIMIT > REQUEST - r = v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(536870912, resource.BinarySI), // size in Bytes - }, - Requests: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(236870912, resource.BinarySI), // size in Bytes - }, - } - - c = newCluster(context, namespace, true, r) - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - // start a basic cluster - _, err = c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - - // Test valid case where pod resource is set appropriately - r = v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(1073741824, resource.BinarySI), // size in Bytes - }, - Requests: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(236870912, resource.BinarySI), // size in Bytes - }, - } - - c = newCluster(context, namespace, true, r) - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - // start a basic cluster - _, err = c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - - // Test no resources were specified on the pod - r = v1.ResourceRequirements{} - c = newCluster(context, namespace, true, r) - c.ClusterInfo = clienttest.CreateTestClusterInfo(1) - // start a basic cluster - _, err = c.Start(c.ClusterInfo, c.rookVersion, cephver.Nautilus, c.spec) - assert.NoError(t, err) - -} - -func TestHostNetwork(t *testing.T) { - clientset := test.New(t, 3) - c := New(&clusterd.Context{Clientset: clientset}, "ns", cephv1.ClusterSpec{}, &k8sutil.OwnerInfo{}, &sync.Mutex{}) - setCommonMonProperties(c, 0, cephv1.MonSpec{Count: 3, AllowMultiplePerNode: true}, "myversion") - - c.spec.Network.HostNetwork = true - - monConfig := testGenMonConfig("c") - pod, err := c.makeMonPod(monConfig, false) - assert.NoError(t, err) - assert.NotNil(t, pod) - assert.Equal(t, true, pod.Spec.HostNetwork) - assert.Equal(t, v1.DNSClusterFirstWithHostNet, pod.Spec.DNSPolicy) - val, message := extractArgValue(pod.Spec.Containers[0].Args, "--public-addr") - assert.Equal(t, "2.4.6.3", val, message) - val, message = extractArgValue(pod.Spec.Containers[0].Args, "--public-bind-addr") - assert.Equal(t, "", val) - assert.Equal(t, "arg not found: --public-bind-addr", message) - - monConfig.Port = 6790 - pod, err = c.makeMonPod(monConfig, false) - assert.NoError(t, err) - val, message = extractArgValue(pod.Spec.Containers[0].Args, "--public-addr") - assert.Equal(t, "2.4.6.3:6790", val, message) - assert.NotNil(t, pod) -} - -func extractArgValue(args []string, name string) (string, string) { - for _, arg := range args { - if strings.Contains(arg, name) { - vals := strings.Split(arg, "=") - if len(vals) != 2 { - return "", "cannot split arg: " + arg - } - return vals[1], "value: " + vals[1] - } - } - return "", "arg not found: " + name -} - -func TestGetNodeInfoFromNode(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 1) - node, err := clientset.CoreV1().Nodes().Get(ctx, "node0", metav1.GetOptions{}) - assert.NoError(t, err) - assert.NotNil(t, node) - - node.Status = v1.NodeStatus{} - node.Status.Addresses = []v1.NodeAddress{ - { - Type: v1.NodeExternalIP, - Address: "1.1.1.1", - }, - } - - var info *MonScheduleInfo - _, err = getNodeInfoFromNode(*node) - assert.NotNil(t, err) - - node.Status.Addresses[0].Type = v1.NodeInternalIP - node.Status.Addresses[0].Address = "172.17.0.1" - info, err = getNodeInfoFromNode(*node) - assert.NoError(t, err) - assert.Equal(t, "172.17.0.1", info.Address) -} diff --git a/pkg/operator/ceph/cluster/mon/predicate.go b/pkg/operator/ceph/cluster/mon/predicate.go deleted file mode 100644 index b858973ce..000000000 --- a/pkg/operator/ceph/cluster/mon/predicate.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "encoding/json" - "reflect" - "sort" - - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" -) - -func PredicateMonEndpointChanges() predicate.Funcs { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return false - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return false - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - UpdateFunc: func(e event.UpdateEvent) bool { - cmNew, ok := e.ObjectNew.(*corev1.ConfigMap) - if !ok { - return false - } - cmOld, ok := e.ObjectOld.(*corev1.ConfigMap) - if !ok { - return false - } - if cmNew.GetName() == EndpointConfigMapName { - if wereMonEndpointsUpdated(cmOld.Data, cmNew.Data) { - logger.Info("monitor endpoints changed, updating the bootstrap peer token") - return true - } - } - return false - }, - } -} - -func wereMonEndpointsUpdated(oldCMData, newCMData map[string]string) bool { - // Check the mapping key first - if oldMapping, ok := oldCMData["mapping"]; ok { - if newMapping, ok := newCMData["mapping"]; ok { - // Unmarshal both into a type - var oldMappingToGo Mapping - err := json.Unmarshal([]byte(oldMapping), &oldMappingToGo) - if err != nil { - logger.Debugf("failed to unmarshal new. %v", err) - return false - } - - var newMappingToGo Mapping - err = json.Unmarshal([]byte(newMapping), &newMappingToGo) - if err != nil { - logger.Debugf("failed to unmarshal new. %v", err) - return false - } - - // If length is different, monitors are different - if len(oldMappingToGo.Schedule) != len(newMappingToGo.Schedule) { - logger.Debugf("mons were added or removed from the endpoints cm") - return true - } - // Since Schedule is map, it's unordered, so let's order it - oldKeys := make([]string, 0, len(oldMappingToGo.Schedule)) - for k := range oldMappingToGo.Schedule { - oldKeys = append(oldKeys, k) - } - sort.Strings(oldKeys) - - newKeys := make([]string, 0, len(newMappingToGo.Schedule)) - for k := range oldMappingToGo.Schedule { - newKeys = append(newKeys, k) - } - sort.Strings(newKeys) - - // Iterate over the map and compare the values - for _, v := range oldKeys { - if !reflect.DeepEqual(oldMappingToGo.Schedule[v], newMappingToGo.Schedule[v]) { - logger.Debugf("oldMappingToGo.Schedule[v] AND newMappingToGo.Schedule[v]: %v | %v", oldMappingToGo.Schedule[v], newMappingToGo.Schedule[v]) - return true - } - } - } - } - - return false -} diff --git a/pkg/operator/ceph/cluster/mon/predicate_test.go b/pkg/operator/ceph/cluster/mon/predicate_test.go deleted file mode 100644 index 505ca77be..000000000 --- a/pkg/operator/ceph/cluster/mon/predicate_test.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import "testing" - -func TestWereMonEndpointsUpdated(t *testing.T) { - type args struct { - oldCMData map[string]string - newCMData map[string]string - } - tests := []struct { - name string - args args - want bool - }{ - {"no old mapping key", args{oldCMData: map[string]string{}, newCMData: map[string]string{"mapping": `{"node":{"g":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"h":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"i":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"}}}`}}, false}, - {"no new mapping key", args{oldCMData: map[string]string{"mapping": `{"node":{"g":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"h":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"i":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"}}}`}, newCMData: map[string]string{}}, false}, - {"identical content", args{oldCMData: map[string]string{"mapping": `{"node":{"g":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"h":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"i":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"}}}`}, newCMData: map[string]string{"mapping": `{"node":{"g":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"h":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"i":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"}}}`}}, false}, - {"identical content but different order", args{oldCMData: map[string]string{"mapping": `{"node":{"h":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"g":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"i":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"}}}`}, newCMData: map[string]string{"mapping": `{"node":{"g":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"h":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"i":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"}}}`}}, false}, - {"same length but different mons IP for 'i'", args{oldCMData: map[string]string{"mapping": `{"node":{"g":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"h":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"i":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"}}}`}, newCMData: map[string]string{"mapping": `{"node":{"g":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"h":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"i":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.187"}}}`}}, true}, - {"different length", args{oldCMData: map[string]string{"mapping": `{"node":{"h":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"i":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"}}}`}, newCMData: map[string]string{"mapping": `{"node":{"g":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"h":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.121"},"i":{"Name":"minikube","Hostname":"minikube","Address":"192.168.39.187"}}}`}}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := wereMonEndpointsUpdated(tt.args.oldCMData, tt.args.newCMData); got != tt.want { - t.Errorf("whereMonEndpointsUpdated() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/operator/ceph/cluster/mon/service.go b/pkg/operator/ceph/cluster/mon/service.go deleted file mode 100644 index acfde8ff0..000000000 --- a/pkg/operator/ceph/cluster/mon/service.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "context" - "strconv" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -func (c *Cluster) createService(mon *monConfig) (string, error) { - ctx := context.TODO() - svcDef := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: mon.ResourceName, - Namespace: c.Namespace, - Labels: c.getLabels(mon, false, true), - }, - Spec: v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: "tcp-msgr1", - Port: mon.Port, - // --public-bind-addr=IP with no IP:port has the mon listen on port 6789 - // regardless of what port the mon advertises (--public-addr) to the outside. - TargetPort: intstr.FromInt(int(DefaultMsgr1Port)), - Protocol: v1.ProtocolTCP, - }, - }, - Selector: c.getLabels(mon, false, false), - }, - } - err := c.ownerInfo.SetOwnerReference(svcDef) - if err != nil { - return "", errors.Wrapf(err, "failed to set owner reference to mon service %q", svcDef.Name) - } - - // If deploying Nautilus or newer we need a new port for the monitor service - addServicePort(svcDef, "tcp-msgr2", DefaultMsgr2Port) - - // Set the ClusterIP if the service does not exist and we expect a certain cluster IP - // For example, in disaster recovery the service might have been deleted accidentally, but we have the - // expected endpoint from the mon configmap. - if mon.PublicIP != "" { - _, err := c.context.Clientset.CoreV1().Services(c.Namespace).Get(ctx, svcDef.Name, metav1.GetOptions{}) - if err != nil && kerrors.IsNotFound(err) { - logger.Infof("ensuring the clusterIP for mon %q is %q", mon.DaemonName, mon.PublicIP) - svcDef.Spec.ClusterIP = mon.PublicIP - } - } - - s, err := k8sutil.CreateOrUpdateService(c.context.Clientset, c.Namespace, svcDef) - if err != nil { - return "", errors.Wrapf(err, "failed to create service for mon %s", mon.DaemonName) - } - - if s == nil { - logger.Errorf("service ip not found for mon %q. if this is not a unit test, this is an error", mon.ResourceName) - return "", nil - } - - // mon endpoint are not actually like, they remain with the mgrs1 format - // however it's interesting to show that monitors can be addressed via 2 different ports - // in the end the service has msgr1 and msgr2 ports configured so it's not entirely wrong - logger.Infof("mon %q endpoint is [v2:%s:%s,v1:%s:%d]", mon.DaemonName, s.Spec.ClusterIP, strconv.Itoa(int(DefaultMsgr2Port)), s.Spec.ClusterIP, mon.Port) - - return s.Spec.ClusterIP, nil -} diff --git a/pkg/operator/ceph/cluster/mon/service_test.go b/pkg/operator/ceph/cluster/mon/service_test.go deleted file mode 100644 index 95f525a8d..000000000 --- a/pkg/operator/ceph/cluster/mon/service_test.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "context" - "sync" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestCreateService(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 1) - c := New(&clusterd.Context{Clientset: clientset}, "ns", cephv1.ClusterSpec{}, &k8sutil.OwnerInfo{}, &sync.Mutex{}) - m := &monConfig{ResourceName: "rook-ceph-mon-b", DaemonName: "b"} - clusterIP, err := c.createService(m) - assert.NoError(t, err) - // the clusterIP will not be set in a mock service - assert.Equal(t, "", clusterIP) - - m.PublicIP = "1.2.3.4" - clusterIP, err = c.createService(m) - assert.NoError(t, err) - // the clusterIP will not be set in the mock because the service already exists - assert.Equal(t, "", clusterIP) - - // delete the service to mock a disaster recovery scenario - err = clientset.CoreV1().Services(c.Namespace).Delete(ctx, m.ResourceName, metav1.DeleteOptions{}) - assert.NoError(t, err) - - clusterIP, err = c.createService(m) - assert.NoError(t, err) - // the clusterIP will now be set to the expected value - assert.Equal(t, m.PublicIP, clusterIP) -} diff --git a/pkg/operator/ceph/cluster/mon/spec.go b/pkg/operator/ceph/cluster/mon/spec.go deleted file mode 100644 index 5bdbbc91e..000000000 --- a/pkg/operator/ceph/cluster/mon/spec.go +++ /dev/null @@ -1,385 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "fmt" - "path" - "strings" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // Full path of the command used to invoke the Ceph mon daemon - cephMonCommand = "ceph-mon" -) - -func (c *Cluster) getLabels(monConfig *monConfig, canary, includeNewLabels bool) map[string]string { - // Mons have a service for each mon, so the additional pod data is relevant for its services - // Use pod labels to keep "mon: id" for legacy - labels := controller.CephDaemonAppLabels(AppName, c.Namespace, "mon", monConfig.DaemonName, includeNewLabels) - // Add "mon_cluster: " for legacy - labels[monClusterAttr] = c.Namespace - if canary { - labels["mon_canary"] = "true" - } - if includeNewLabels { - monVolumeClaimTemplate := c.monVolumeClaimTemplate(monConfig) - if monVolumeClaimTemplate != nil { - size := monVolumeClaimTemplate.Spec.Resources.Requests[v1.ResourceStorage] - labels["pvc_name"] = monConfig.ResourceName - labels["pvc_size"] = size.String() - } - if monConfig.Zone != "" { - labels["stretch-zone"] = monConfig.Zone - } - } - - return labels -} - -func (c *Cluster) stretchFailureDomainName() string { - label := StretchFailureDomainLabel(c.spec) - index := strings.Index(label, "/") - if index == -1 { - return label - } - return label[index+1:] -} - -func StretchFailureDomainLabel(spec cephv1.ClusterSpec) string { - if spec.Mon.StretchCluster.FailureDomainLabel != "" { - return spec.Mon.StretchCluster.FailureDomainLabel - } - // The default topology label is for a zone - return corev1.LabelZoneFailureDomainStable -} - -func (c *Cluster) makeDeployment(monConfig *monConfig, canary bool) (*apps.Deployment, error) { - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: monConfig.ResourceName, - Namespace: c.Namespace, - Labels: c.getLabels(monConfig, canary, true), - }, - } - k8sutil.AddRookVersionLabelToDeployment(d) - cephv1.GetMonAnnotations(c.spec.Annotations).ApplyToObjectMeta(&d.ObjectMeta) - cephv1.GetMonLabels(c.spec.Labels).ApplyToObjectMeta(&d.ObjectMeta) - controller.AddCephVersionLabelToDeployment(c.ClusterInfo.CephVersion, d) - err := c.ownerInfo.SetControllerReference(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to mon deployment %q", d.Name) - } - - pod, err := c.makeMonPod(monConfig, canary) - if err != nil { - return nil, err - } - replicaCount := int32(1) - d.Spec = apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: c.getLabels(monConfig, canary, false), - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: pod.ObjectMeta, - Spec: pod.Spec, - }, - Replicas: &replicaCount, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - } - - return d, nil -} - -func (c *Cluster) makeDeploymentPVC(m *monConfig, canary bool) (*corev1.PersistentVolumeClaim, error) { - template := c.monVolumeClaimTemplate(m) - volumeMode := corev1.PersistentVolumeFilesystem - pvc := &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: m.ResourceName, - Namespace: c.Namespace, - Labels: c.getLabels(m, canary, true), - }, - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Resources: template.Spec.Resources, - StorageClassName: template.Spec.StorageClassName, - VolumeMode: &volumeMode, - }, - } - k8sutil.AddRookVersionLabelToObjectMeta(&pvc.ObjectMeta) - cephv1.GetMonAnnotations(c.spec.Annotations).ApplyToObjectMeta(&pvc.ObjectMeta) - controller.AddCephVersionLabelToObjectMeta(c.ClusterInfo.CephVersion, &pvc.ObjectMeta) - err := c.ownerInfo.SetControllerReference(pvc) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to mon pvc %q", pvc.Name) - } - - // k8s uses limit as the resource request fallback - if _, ok := pvc.Spec.Resources.Limits[corev1.ResourceStorage]; ok { - return pvc, nil - } - - // specific request in the crd - if _, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok { - return pvc, nil - } - - req, err := resource.ParseQuantity(cephMonDefaultStorageRequest) - if err != nil { - return nil, err - } - - if pvc.Spec.Resources.Requests == nil { - pvc.Spec.Resources.Requests = corev1.ResourceList{} - } - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = req - - return pvc, nil -} - -func (c *Cluster) makeMonPod(monConfig *monConfig, canary bool) (*corev1.Pod, error) { - logger.Debugf("monConfig: %+v", monConfig) - podSpec := corev1.PodSpec{ - InitContainers: []corev1.Container{ - c.makeChownInitContainer(monConfig), - c.makeMonFSInitContainer(monConfig), - }, - Containers: []corev1.Container{ - c.makeMonDaemonContainer(monConfig), - }, - RestartPolicy: corev1.RestartPolicyAlways, - // we decide later whether to use a PVC volume or host volumes for mons, so only populate - // the base volumes at this point. - Volumes: controller.DaemonVolumesBase(monConfig.DataPathMap, keyringStoreName), - HostNetwork: c.spec.Network.IsHost(), - PriorityClassName: cephv1.GetMonPriorityClassName(c.spec.PriorityClassNames), - } - - // If the log collector is enabled we add the side-car container - if c.spec.LogCollector.Enabled { - shareProcessNamespace := true - podSpec.ShareProcessNamespace = &shareProcessNamespace - podSpec.Containers = append(podSpec.Containers, *controller.LogCollectorContainer(fmt.Sprintf("%s.%s", cephMonCommand, monConfig.DaemonName), c.ClusterInfo.Namespace, c.spec)) - } - - // Replace default unreachable node toleration - if c.monVolumeClaimTemplate(monConfig) != nil { - k8sutil.AddUnreachableNodeToleration(&podSpec) - } - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: monConfig.ResourceName, - Namespace: c.Namespace, - Labels: c.getLabels(monConfig, canary, true), - }, - Spec: podSpec, - } - cephv1.GetMonAnnotations(c.spec.Annotations).ApplyToObjectMeta(&pod.ObjectMeta) - cephv1.GetMonLabels(c.spec.Labels).ApplyToObjectMeta(&pod.ObjectMeta) - - if c.spec.Network.IsHost() { - pod.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet - } else if c.spec.Network.IsMultus() { - if err := k8sutil.ApplyMultus(c.spec.Network, &pod.ObjectMeta); err != nil { - return nil, err - } - } - - if c.spec.IsStretchCluster() { - nodeAffinity, err := k8sutil.GenerateNodeAffinity(fmt.Sprintf("%s=%s", StretchFailureDomainLabel(c.spec), monConfig.Zone)) - if err != nil { - return nil, errors.Wrapf(err, "failed to generate mon %q node affinity", monConfig.DaemonName) - } - pod.Spec.Affinity = &corev1.Affinity{NodeAffinity: nodeAffinity} - } - - return pod, nil -} - -/* - * Container specs - */ - -// Init and daemon containers require the same context, so we call it 'pod' context - -func (c *Cluster) makeChownInitContainer(monConfig *monConfig) corev1.Container { - return controller.ChownCephDataDirsInitContainer( - *monConfig.DataPathMap, - c.spec.CephVersion.Image, - controller.DaemonVolumeMounts(monConfig.DataPathMap, keyringStoreName), - cephv1.GetMonResources(c.spec.Resources), - controller.PodSecurityContext(), - ) -} - -func (c *Cluster) makeMonFSInitContainer(monConfig *monConfig) corev1.Container { - return corev1.Container{ - Name: "init-mon-fs", - Command: []string{ - cephMonCommand, - }, - Args: append( - controller.DaemonFlags(c.ClusterInfo, &c.spec, monConfig.DaemonName), - // needed so we can generate an initial monmap - // otherwise the mkfs will say: "0  no local addrs match monmap" - config.NewFlag("public-addr", monConfig.PublicIP), - "--mkfs", - ), - Image: c.spec.CephVersion.Image, - VolumeMounts: controller.DaemonVolumeMounts(monConfig.DataPathMap, keyringStoreName), - SecurityContext: controller.PodSecurityContext(), - // filesystem creation does not require ports to be exposed - Env: controller.DaemonEnvVars(c.spec.CephVersion.Image), - Resources: cephv1.GetMonResources(c.spec.Resources), - } -} - -func (c *Cluster) makeMonDaemonContainer(monConfig *monConfig) corev1.Container { - podIPEnvVar := "ROOK_POD_IP" - publicAddr := monConfig.PublicIP - - // Handle the non-default port for host networking. If host networking is not being used, - // the service created elsewhere will handle the non-default port redirection to the default port inside the container. - if c.spec.Network.IsHost() && monConfig.Port != DefaultMsgr1Port { - logger.Warningf("Starting mon %s with host networking on a non-default port %d. The mon must be failed over before enabling msgr2.", - monConfig.DaemonName, monConfig.Port) - publicAddr = fmt.Sprintf("%s:%d", publicAddr, monConfig.Port) - } - - container := corev1.Container{ - Name: "mon", - Command: []string{ - cephMonCommand, - }, - Args: append( - controller.DaemonFlags(c.ClusterInfo, &c.spec, monConfig.DaemonName), - "--foreground", - // If the mon is already in the monmap, when the port is left off of --public-addr, - // it will still advertise on the previous port b/c monmap is saved to mon database. - config.NewFlag("public-addr", publicAddr), - // Set '--setuser-match-path' so that existing directory owned by root won't affect the daemon startup. - // For existing data store owned by root, the daemon will continue to run as root - // - // We use 'store.db' here because during an upgrade the init container will set 'ceph:ceph' to monConfig.DataPathMap.ContainerDataDir - // but inside the permissions will be 'root:root' AND we don't want to chown recursively on the mon data directory - // We want to avoid potential startup time issue if the store is big - config.NewFlag("setuser-match-path", path.Join(monConfig.DataPathMap.ContainerDataDir, "store.db")), - ), - Image: c.spec.CephVersion.Image, - VolumeMounts: controller.DaemonVolumeMounts(monConfig.DataPathMap, keyringStoreName), - SecurityContext: controller.PodSecurityContext(), - Ports: []corev1.ContainerPort{ - { - Name: "tcp-msgr1", - ContainerPort: monConfig.Port, - Protocol: corev1.ProtocolTCP, - }, - }, - Env: append( - controller.DaemonEnvVars(c.spec.CephVersion.Image), - k8sutil.PodIPEnvVar(podIPEnvVar), - ), - Resources: cephv1.GetMonResources(c.spec.Resources), - LivenessProbe: controller.GenerateLivenessProbeExecDaemon(config.MonType, monConfig.DaemonName), - WorkingDir: config.VarLogCephDir, - } - - if monConfig.Zone != "" { - desiredLocation := fmt.Sprintf("%s=%s", c.stretchFailureDomainName(), monConfig.Zone) - container.Args = append(container.Args, []string{"--set-crush-location", desiredLocation}...) - if monConfig.Zone == c.getArbiterZone() { - // remember the arbiter mon to be set later in the reconcile after the OSDs are configured - c.arbiterMon = monConfig.DaemonName - } - } - - // If the liveness probe is enabled - container = config.ConfigureLivenessProbe(cephv1.KeyMon, container, c.spec.HealthCheck) - - // If host networking is enabled, we don't need a bind addr that is different from the public addr - if !c.spec.Network.IsHost() { - // Opposite of the above, --public-bind-addr will *not* still advertise on the previous - // port, which makes sense because this is the pod IP, which changes with every new pod. - container.Args = append(container.Args, - config.NewFlag("public-bind-addr", controller.ContainerEnvVarReference(podIPEnvVar))) - } - - // Add messenger 2 port - addContainerPort(container, "tcp-msgr2", 3300) - - return container -} - -// UpdateCephDeploymentAndWait verifies a deployment can be stopped or continued -func UpdateCephDeploymentAndWait(context *clusterd.Context, clusterInfo *client.ClusterInfo, deployment *apps.Deployment, daemonType, daemonName string, skipUpgradeChecks, continueUpgradeAfterChecksEvenIfNotHealthy bool) error { - - callback := func(action string) error { - // At this point, we are in an upgrade - if skipUpgradeChecks { - logger.Warningf("this is a Ceph upgrade, not performing upgrade checks because skipUpgradeChecks is %t", skipUpgradeChecks) - return nil - } - - logger.Infof("checking if we can %s the deployment %s", action, deployment.Name) - - if action == "stop" { - err := client.OkToStop(context, clusterInfo, deployment.Name, daemonType, daemonName) - if err != nil { - if continueUpgradeAfterChecksEvenIfNotHealthy { - logger.Infof("The %s daemon %s is not ok-to-stop but 'continueUpgradeAfterChecksEvenIfNotHealthy' is true, so proceeding to stop...", daemonType, daemonName) - return nil - } - return errors.Wrapf(err, "failed to check if we can %s the deployment %s", action, deployment.Name) - } - } - - if action == "continue" { - err := client.OkToContinue(context, clusterInfo, deployment.Name, daemonType, daemonName) - if err != nil { - if continueUpgradeAfterChecksEvenIfNotHealthy { - logger.Infof("The %s daemon %s is not ok-to-stop but 'continueUpgradeAfterChecksEvenIfNotHealthy' is true, so continuing...", daemonType, daemonName) - return nil - } - return errors.Wrapf(err, "failed to check if we can %s the deployment %s", action, deployment.Name) - } - } - - return nil - } - - err := k8sutil.UpdateDeploymentAndWait(context, deployment, clusterInfo.Namespace, callback) - return err -} diff --git a/pkg/operator/ceph/cluster/mon/spec_test.go b/pkg/operator/ceph/cluster/mon/spec_test.go deleted file mode 100644 index 740a73ec4..000000000 --- a/pkg/operator/ceph/cluster/mon/spec_test.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "sync" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/test" - "github.com/rook/rook/pkg/operator/k8sutil" - testop "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" -) - -func TestPodSpecs(t *testing.T) { - testPodSpec(t, "a", true) - testPodSpec(t, "mon0", true) - testPodSpec(t, "a", false) - testPodSpec(t, "mon0", false) -} - -func testPodSpec(t *testing.T, monID string, pvc bool) { - clientset := testop.New(t, 1) - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := New( - &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook"}, - "ns", - cephv1.ClusterSpec{}, - ownerInfo, - &sync.Mutex{}, - ) - setCommonMonProperties(c, 0, cephv1.MonSpec{Count: 3, AllowMultiplePerNode: true}, "rook/rook:myversion") - c.spec.CephVersion = cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:myceph"} - c.spec.Resources = map[string]v1.ResourceRequirements{} - c.spec.Resources["mon"] = v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(200.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(1337.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(500.0, resource.BinarySI), - }, - } - c.spec.PriorityClassNames = map[rook.KeyType]string{ - cephv1.KeyMon: "my-priority-class", - } - monConfig := testGenMonConfig(monID) - - d, err := c.makeDeployment(monConfig, false) - assert.NoError(t, err) - assert.NotNil(t, d) - - if pvc { - d.Spec.Template.Spec.Volumes = append( - d.Spec.Template.Spec.Volumes, controller.DaemonVolumesDataPVC("i-am-pvc")) - } else { - d.Spec.Template.Spec.Volumes = append( - d.Spec.Template.Spec.Volumes, controller.DaemonVolumesDataHostPath(monConfig.DataPathMap)...) - } - - // Deployment should have Ceph labels - test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.MonType, monID, AppName, "ns") - - podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) - podTemplate.RunFullSuite(config.MonType, monID, AppName, "ns", "quay.io/ceph/ceph:myceph", - "200", "100", "1337", "500", /* resources */ - "my-priority-class") -} - -func TestDeploymentPVCSpec(t *testing.T) { - clientset := testop.New(t, 1) - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := New( - &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook"}, - "ns", - cephv1.ClusterSpec{}, - ownerInfo, - &sync.Mutex{}, - ) - setCommonMonProperties(c, 0, cephv1.MonSpec{Count: 3, AllowMultiplePerNode: true}, "rook/rook:myversion") - c.spec.CephVersion = cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:myceph"} - c.spec.Resources = map[string]v1.ResourceRequirements{} - c.spec.Resources["mon"] = v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(200.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(1337.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(500.0, resource.BinarySI), - }, - } - monConfig := testGenMonConfig("a") - - // configured with default storage request - c.spec.Mon.VolumeClaimTemplate = &v1.PersistentVolumeClaim{} - pvc, err := c.makeDeploymentPVC(monConfig, false) - assert.NoError(t, err) - defaultReq, err := resource.ParseQuantity(cephMonDefaultStorageRequest) - assert.NoError(t, err) - assert.Equal(t, pvc.Spec.Resources.Requests[v1.ResourceStorage], defaultReq) - - // limit is preserved - req, err := resource.ParseQuantity("22Gi") - assert.NoError(t, err) - c.spec.Mon.VolumeClaimTemplate = &v1.PersistentVolumeClaim{ - Spec: v1.PersistentVolumeClaimSpec{ - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{v1.ResourceStorage: req}, - }, - }, - } - pvc, err = c.makeDeploymentPVC(monConfig, false) - assert.NoError(t, err) - assert.Equal(t, pvc.Spec.Resources.Limits[v1.ResourceStorage], req) - - // request is preserved - req, err = resource.ParseQuantity("23Gi") - assert.NoError(t, err) - c.spec.Mon.VolumeClaimTemplate = &v1.PersistentVolumeClaim{ - Spec: v1.PersistentVolumeClaimSpec{ - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceStorage: req}, - }, - }, - } - pvc, err = c.makeDeploymentPVC(monConfig, false) - assert.NoError(t, err) - assert.Equal(t, pvc.Spec.Resources.Requests[v1.ResourceStorage], req) -} - -func testRequiredDuringScheduling(t *testing.T, hostNetwork, allowMultiplePerNode, required bool) { - c := New( - &clusterd.Context{}, - "ns", - cephv1.ClusterSpec{}, - &k8sutil.OwnerInfo{}, - &sync.Mutex{}, - ) - - c.spec.Network.HostNetwork = hostNetwork - c.spec.Mon.AllowMultiplePerNode = allowMultiplePerNode - assert.Equal(t, required, requiredDuringScheduling(&c.spec)) -} - -func TestRequiredDuringScheduling(t *testing.T) { - testRequiredDuringScheduling(t, false, false, true) - testRequiredDuringScheduling(t, true, false, true) - testRequiredDuringScheduling(t, true, true, true) - testRequiredDuringScheduling(t, false, true, false) -} diff --git a/pkg/operator/ceph/cluster/mon/util.go b/pkg/operator/ceph/cluster/mon/util.go deleted file mode 100644 index 50e571607..000000000 --- a/pkg/operator/ceph/cluster/mon/util.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mon - -import ( - "strings" - - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -func monInQuorum(monitor client.MonMapEntry, quorum []int) bool { - for _, rank := range quorum { - if rank == monitor.Rank { - return true - } - } - return false -} - -// convert the mon name to the numeric mon ID -func fullNameToIndex(name string) (int, error) { - // remove the "rook-ceph-mon" prefix - name = strings.TrimPrefix(name, AppName) - // remove the "-" prefix - name = strings.TrimPrefix(name, "-") - return k8sutil.NameToIndex(name) -} - -// addServicePort adds a port to a service -func addServicePort(service *v1.Service, name string, port int32) { - if port == 0 { - return - } - service.Spec.Ports = append(service.Spec.Ports, v1.ServicePort{ - Name: name, - Port: port, - TargetPort: intstr.FromInt(int(port)), - Protocol: v1.ProtocolTCP, - }) -} - -// addContainerPort adds a port to a container -func addContainerPort(container v1.Container, name string, port int32) { - if port == 0 { - return - } - container.Ports = append(container.Ports, v1.ContainerPort{ - Name: name, - ContainerPort: port, - Protocol: v1.ProtocolTCP, - }) -} diff --git a/pkg/operator/ceph/cluster/monitoring.go b/pkg/operator/ceph/cluster/monitoring.go deleted file mode 100644 index 9748b5566..000000000 --- a/pkg/operator/ceph/cluster/monitoring.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - "github.com/rook/rook/pkg/operator/ceph/object/bucket" -) - -var ( - monitorDaemonList = []string{"mon", "osd", "status"} -) - -func (c *ClusterController) configureCephMonitoring(cluster *cluster, clusterInfo *cephclient.ClusterInfo) { - var isDisabled bool - - for _, daemon := range monitorDaemonList { - // Is the monitoring enabled for that daemon? - isDisabled = isMonitoringDisabled(daemon, cluster.Spec) - if health, ok := cluster.monitoringChannels[daemon]; ok { - if health.monitoringRunning { - // If the goroutine was running but the CR was updated to stop the monitoring we need to close the channel - if isDisabled { - // close the channel so the goroutine can stop - close(cluster.monitoringChannels[daemon].stopChan) - // Set monitoring to false since it's not running anymore - cluster.monitoringChannels[daemon].monitoringRunning = false - } else { - logger.Debugf("ceph %s health go routine is already running for cluster %q", daemon, cluster.Namespace) - } - } else { - // if not already running and not disabled, we run it - if !isDisabled { - // Run the go routine - c.startMonitoringCheck(cluster, clusterInfo, daemon) - - // Set the flag to indicate monitoring is running - cluster.monitoringChannels[daemon].monitoringRunning = true - } - } - } else { - if !isDisabled { - cluster.monitoringChannels[daemon] = &clusterHealth{ - stopChan: make(chan struct{}), - monitoringRunning: true, // Set the flag to indicate monitoring is running - } - - // Run the go routine - c.startMonitoringCheck(cluster, clusterInfo, daemon) - } - } - } - - // Start watchers - if cluster.watchersActivated { - logger.Debugf("cluster is already being watched by bucket and client provisioner for cluster %q", cluster.Namespace) - return - } - - // Start the object bucket provisioner - bucketProvisioner := bucket.NewProvisioner(c.context, clusterInfo) - // If cluster is external, pass down the user to the bucket controller - - // note: the error return below is ignored and is expected to be removed from the - // bucket library's `NewProvisioner` function - bucketController, _ := bucket.NewBucketController(c.context.KubeConfig, bucketProvisioner) - go func() { - err := bucketController.Run(cluster.stopCh) - if err != nil { - logger.Errorf("failed to run bucket controller. %v", err) - } - }() - - // enable the cluster watcher once - cluster.watchersActivated = true -} - -func isMonitoringDisabled(daemon string, clusterSpec *cephv1.ClusterSpec) bool { - switch daemon { - case "mon": - return clusterSpec.HealthCheck.DaemonHealth.Monitor.Disabled - - case "osd": - return clusterSpec.HealthCheck.DaemonHealth.ObjectStorageDaemon.Disabled - - case "status": - return clusterSpec.HealthCheck.DaemonHealth.Status.Disabled - } - - return false -} - -func (c *ClusterController) startMonitoringCheck(cluster *cluster, clusterInfo *cephclient.ClusterInfo, daemon string) { - switch daemon { - case "mon": - healthChecker := mon.NewHealthChecker(cluster.mons) - logger.Infof("enabling ceph %s monitoring goroutine for cluster %q", daemon, cluster.Namespace) - go healthChecker.Check(cluster.monitoringChannels[daemon].stopChan) - - case "osd": - if !cluster.Spec.External.Enable { - c.osdChecker = osd.NewOSDHealthMonitor(c.context, clusterInfo, cluster.Spec.RemoveOSDsIfOutAndSafeToRemove, cluster.Spec.HealthCheck) - logger.Infof("enabling ceph %s monitoring goroutine for cluster %q", daemon, cluster.Namespace) - go c.osdChecker.Start(cluster.monitoringChannels[daemon].stopChan) - } - - case "status": - cephChecker := newCephStatusChecker(c.context, clusterInfo, cluster.Spec) - logger.Infof("enabling ceph %s monitoring goroutine for cluster %q", daemon, cluster.Namespace) - go cephChecker.checkCephStatus(cluster.monitoringChannels[daemon].stopChan) - } -} diff --git a/pkg/operator/ceph/cluster/monitoring_test.go b/pkg/operator/ceph/cluster/monitoring_test.go deleted file mode 100644 index a0eed4ff5..000000000 --- a/pkg/operator/ceph/cluster/monitoring_test.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" -) - -func TestIsMonitoringDisabled(t *testing.T) { - type args struct { - daemon string - clusterSpec *cephv1.ClusterSpec - } - tests := []struct { - name string - args args - want bool - }{ - {"isDisabled", args{"mon", &cephv1.ClusterSpec{}}, false}, - {"isEnabled", args{"mon", &cephv1.ClusterSpec{HealthCheck: cephv1.CephClusterHealthCheckSpec{DaemonHealth: cephv1.DaemonHealthSpec{Monitor: cephv1.HealthCheckSpec{Disabled: true}}}}}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := isMonitoringDisabled(tt.args.daemon, tt.args.clusterSpec); got != tt.want { - t.Errorf("isMonitoringEnabled() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/operator/ceph/cluster/operator_watchers.go b/pkg/operator/ceph/cluster/operator_watchers.go deleted file mode 100644 index ec3ca60c7..000000000 --- a/pkg/operator/ceph/cluster/operator_watchers.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - "os" - "reflect" - - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" -) - -// StartOperatorSettingsWatch starts the operator settings watcher -func (c *ClusterController) StartOperatorSettingsWatch(stopCh chan struct{}) { - operatorNamespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - // watch for "rook-ceph-operator-config" ConfigMap - k8sutil.StartOperatorSettingsWatch(c.context, operatorNamespace, opcontroller.OperatorSettingConfigMapName, - c.operatorConfigChange, - func(oldObj, newObj interface{}) { - if reflect.DeepEqual(oldObj, newObj) { - return - } - c.operatorConfigChange(newObj) - }, nil, stopCh) -} - -// StopWatch stop watchers -func (c *ClusterController) StopWatch() { - for _, cluster := range c.clusterMap { - // check channel is open before closing - if !cluster.closedStopCh { - close(cluster.stopCh) - cluster.closedStopCh = true - } - } - c.clusterMap = make(map[string]*cluster) -} - -func (c *ClusterController) operatorConfigChange(obj interface{}) { - cm, ok := obj.(*v1.ConfigMap) - if !ok { - logger.Warningf("Expected ConfigMap but handler received %T. %#v", obj, obj) - return - } - - logger.Infof("ConfigMap %q changes detected. Updating configurations", cm.Name) - for _, callback := range c.operatorConfigCallbacks { - if err := callback(); err != nil { - logger.Errorf("%v", err) - } - } -} diff --git a/pkg/operator/ceph/cluster/osd/config.go b/pkg/operator/ceph/cluster/osd/config.go deleted file mode 100644 index 4ebf86d8a..000000000 --- a/pkg/operator/ceph/cluster/osd/config.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "encoding/base64" - "fmt" - "path" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" - opconfig "github.com/rook/rook/pkg/operator/ceph/config" - v1 "k8s.io/api/core/v1" -) - -const ( - dmCryptKeySize = 128 -) - -// PrivilegedContext returns a privileged Pod security context -func PrivilegedContext() *v1.SecurityContext { - privileged := true - - return &v1.SecurityContext{ - Privileged: &privileged, - } -} - -func osdOnSDNFlag(network cephv1.NetworkSpec) []string { - var args []string - // OSD fails to find the right IP to bind to when running on SDN - // for more details: https://github.com/rook/rook/issues/3140 - if !network.IsHost() { - args = append(args, "--ms-learn-addr-from-peer=false") - } - - return args -} - -func encryptionKeyPath() string { - return path.Join(opconfig.EtcCephDir, encryptionKeyFileName) -} - -func encryptionDMName(pvcName, blockType string) string { - return fmt.Sprintf("%s-%s", pvcName, blockType) -} - -func encryptionDMPath(pvcName, blockType string) string { - return path.Join("/dev/mapper", encryptionDMName(pvcName, blockType)) -} - -func encryptionBlockDestinationCopy(mountPath, blockType string) string { - return path.Join(mountPath, blockType) + "-tmp" -} - -func generateDmCryptKey() (string, error) { - key, err := mgr.GenerateRandomBytes(dmCryptKeySize) - if err != nil { - return "", errors.Wrap(err, "failed to generate random bytes") - } - - return base64.StdEncoding.EncodeToString(key), nil -} - -func (c *Cluster) isCephVolumeRawModeSupported() bool { - if c.clusterInfo.CephVersion.IsAtLeast(cephVolumeRawEncryptionModeMinNautilusCephVersion) && !c.clusterInfo.CephVersion.IsOctopus() { - return true - } - if c.clusterInfo.CephVersion.IsAtLeast(cephVolumeRawEncryptionModeMinOctopusCephVersion) { - return true - } - - return false -} diff --git a/pkg/operator/ceph/cluster/osd/config/config.go b/pkg/operator/ceph/cluster/osd/config/config.go deleted file mode 100644 index bae425afb..000000000 --- a/pkg/operator/ceph/cluster/osd/config/config.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config for OSD config managed by the operator -package config - -import ( - "strconv" -) - -const ( - WalSizeMBKey = "walSizeMB" - DatabaseSizeMBKey = "databaseSizeMB" - JournalSizeMBKey = "journalSizeMB" - OSDsPerDeviceKey = "osdsPerDevice" - EncryptedDeviceKey = "encryptedDevice" - MetadataDeviceKey = "metadataDevice" - DeviceClassKey = "deviceClass" - InitialWeightKey = "initialWeight" - PrimaryAffinityKey = "primaryAffinity" -) - -// StoreConfig represents the configuration of an OSD on a device. -type StoreConfig struct { - WalSizeMB int `json:"walSizeMB,omitempty"` - DatabaseSizeMB int `json:"databaseSizeMB,omitempty"` - OSDsPerDevice int `json:"osdsPerDevice,omitempty"` - EncryptedDevice bool `json:"encryptedDevice,omitempty"` - MetadataDevice string `json:"metadataDevice,omitempty"` - DeviceClass string `json:"deviceClass,omitempty"` - InitialWeight string `json:"initialWeight,omitempty"` - PrimaryAffinity string `json:"primaryAffinity,omitempty"` -} - -// NewStoreConfig returns a StoreConfig with proper defaults set. -func NewStoreConfig() StoreConfig { - return StoreConfig{ - OSDsPerDevice: 1, - } -} - -// ToStoreConfig converts a config string-string map to a StoreConfig. -func ToStoreConfig(config map[string]string) StoreConfig { - storeConfig := NewStoreConfig() - for k, v := range config { - switch k { - case WalSizeMBKey: - storeConfig.WalSizeMB = convertToIntIgnoreErr(v) - case DatabaseSizeMBKey: - storeConfig.DatabaseSizeMB = convertToIntIgnoreErr(v) - case OSDsPerDeviceKey: - i := convertToIntIgnoreErr(v) - if i > 0 { // only allow values 1 or more to be set - storeConfig.OSDsPerDevice = i - } - case EncryptedDeviceKey: - storeConfig.EncryptedDevice = (v == "true") - case MetadataDeviceKey: - storeConfig.MetadataDevice = v - case DeviceClassKey: - storeConfig.DeviceClass = v - case InitialWeightKey: - storeConfig.InitialWeight = v - case PrimaryAffinityKey: - storeConfig.PrimaryAffinity = v - } - } - - return storeConfig -} - -func MetadataDevice(config map[string]string) string { - for k, v := range config { - switch k { - case MetadataDeviceKey: - return v - } - } - - return "" -} - -func convertToIntIgnoreErr(raw string) int { - val, err := strconv.Atoi(raw) - if err != nil { - val = 0 - } - - return val -} - -// ConfiguredDevice is a device with a corresponding configuration. -type ConfiguredDevice struct { - ID string `json:"id"` - StoreConfig StoreConfig `json:"storeConfig"` -} diff --git a/pkg/operator/ceph/cluster/osd/config/scheme.go b/pkg/operator/ceph/cluster/osd/config/scheme.go deleted file mode 100644 index 37478de76..000000000 --- a/pkg/operator/ceph/cluster/osd/config/scheme.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -const ( - // Bluestore represents a bluestore OSD - Bluestore = "bluestore" - // WalDefaultSizeMB is the default WAL size in Megabytes for Rocksdb in Bluestore - WalDefaultSizeMB = 576 -) diff --git a/pkg/operator/ceph/cluster/osd/config_test.go b/pkg/operator/ceph/cluster/osd/config_test.go deleted file mode 100644 index 5753d76f5..000000000 --- a/pkg/operator/ceph/cluster/osd/config_test.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/stretchr/testify/assert" -) - -func TestOsdOnSDNFlag(t *testing.T) { - network := cephv1.NetworkSpec{} - - args := osdOnSDNFlag(network) - assert.NotEmpty(t, args) - - network.Provider = "host" - args = osdOnSDNFlag(network) - assert.Empty(t, args) -} - -func TestEncryptionKeyPath(t *testing.T) { - assert.Equal(t, "/etc/ceph/luks_key", encryptionKeyPath()) -} - -func TestEncryptionBlockDestinationCopy(t *testing.T) { - m := "/var/lib/ceph/osd/ceph-0" - assert.Equal(t, "/var/lib/ceph/osd/ceph-0/block-tmp", encryptionBlockDestinationCopy(m, bluestoreBlockName)) - assert.Equal(t, "/var/lib/ceph/osd/ceph-0/block.db-tmp", encryptionBlockDestinationCopy(m, bluestoreMetadataName)) - assert.Equal(t, "/var/lib/ceph/osd/ceph-0/block.wal-tmp", encryptionBlockDestinationCopy(m, bluestoreWalName)) -} - -func TestEncryptionDMPath(t *testing.T) { - assert.Equal(t, "/dev/mapper/set1-data-0-6rqdn-block-dmcrypt", encryptionDMPath("set1-data-0-6rqdn", DmcryptBlockType)) -} - -func TestEncryptionDMName(t *testing.T) { - assert.Equal(t, "set1-data-0-6rqdn-block-dmcrypt", encryptionDMName("set1-data-0-6rqdn", DmcryptBlockType)) -} - -func TestClusterIsCephVolumeRAwModeSupported(t *testing.T) { - type fields struct { - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - rookVersion string - spec cephv1.ClusterSpec - ValidStorage cephv1.StorageScopeSpec - kv *k8sutil.ConfigMapKVStore - } - tests := []struct { - name string - fields fields - want bool - }{ - {"nok-14.2.4", fields{&clusterd.Context{}, &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 4}}, "", cephv1.ClusterSpec{}, cephv1.StorageScopeSpec{}, &k8sutil.ConfigMapKVStore{}}, false}, - {"ok-14.2.11", fields{&clusterd.Context{}, &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 14, Minor: 2, Extra: 11}}, "", cephv1.ClusterSpec{}, cephv1.StorageScopeSpec{}, &k8sutil.ConfigMapKVStore{}}, true}, - {"nok-15.2.4", fields{&clusterd.Context{}, &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 15, Minor: 2, Extra: 4}}, "", cephv1.ClusterSpec{}, cephv1.StorageScopeSpec{}, &k8sutil.ConfigMapKVStore{}}, false}, - {"ok-15.2.5", fields{&clusterd.Context{}, &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 15, Minor: 2, Extra: 5}}, "", cephv1.ClusterSpec{}, cephv1.StorageScopeSpec{}, &k8sutil.ConfigMapKVStore{}}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Cluster{ - context: tt.fields.context, - clusterInfo: tt.fields.clusterInfo, - rookVersion: tt.fields.rookVersion, - spec: tt.fields.spec, - ValidStorage: tt.fields.ValidStorage, - kv: tt.fields.kv, - } - if got := c.isCephVolumeRawModeSupported(); got != tt.want { - t.Errorf("Cluster.isCephVolumeRAwModeSupported() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/operator/ceph/cluster/osd/create.go b/pkg/operator/ceph/cluster/osd/create.go deleted file mode 100644 index eaba45506..000000000 --- a/pkg/operator/ceph/cluster/osd/create.go +++ /dev/null @@ -1,410 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "fmt" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - kms "github.com/rook/rook/pkg/daemon/ceph/osd/kms" - osdconfig "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/version" -) - -type createConfig struct { - cluster *Cluster - provisionConfig *provisionConfig - awaitingStatusConfigMaps sets.String // These status configmaps were created for OSD prepare jobs - finishedStatusConfigMaps sets.String // Status configmaps are added here as provisioning is completed for them - deployments *existenceList // these OSDs have existing deployments -} - -// allow overriding these functions for unit tests -var ( - createDaemonOnNodeFunc = createDaemonOnNode - createDaemonOnPVCFunc = createDaemonOnPVC - - updateConditionFunc = opcontroller.UpdateCondition -) - -func (c *Cluster) newCreateConfig( - provisionConfig *provisionConfig, - awaitingStatusConfigMaps sets.String, - deployments *existenceList, -) *createConfig { - if awaitingStatusConfigMaps == nil { - awaitingStatusConfigMaps = sets.NewString() - } - return &createConfig{ - c, - provisionConfig, - awaitingStatusConfigMaps, - sets.NewString(), - deployments, - } -} - -func (c *createConfig) progress() (completed, initial int) { - return c.finishedStatusConfigMaps.Len(), c.awaitingStatusConfigMaps.Len() -} - -func (c *createConfig) doneCreating() bool { - return c.awaitingStatusConfigMaps.Len() == c.finishedStatusConfigMaps.Len() -} - -func (c *createConfig) createNewOSDsFromStatus( - status *OrchestrationStatus, - nodeOrPVCName string, - errs *provisionErrors, -) { - if !c.awaitingStatusConfigMaps.Has(statusConfigMapName(nodeOrPVCName)) { - // If there is a dangling OSD prepare configmap from another reconcile, don't process it - logger.Infof("not creating deployments for OSD prepare results found in ConfigMap %q which was not created for the latest storage spec", statusConfigMapName(nodeOrPVCName)) - return - } - - if c.finishedStatusConfigMaps.Has(statusConfigMapName(nodeOrPVCName)) { - // If we have already processed this configmap, don't process it again - logger.Infof("not creating deployments for OSD prepare results found in ConfigMap %q which was already processed", statusConfigMapName(nodeOrPVCName)) - return - } - - for _, osd := range status.OSDs { - if c.deployments.Exists(osd.ID) { - // This OSD will be handled by the updater - logger.Debugf("not creating deployment for OSD %d which already exists", osd.ID) - continue - } - if status.PvcBackedOSD { - logger.Infof("creating OSD %d on PVC %q", osd.ID, nodeOrPVCName) - err := createDaemonOnPVCFunc(c.cluster, osd, nodeOrPVCName, c.provisionConfig) - if err != nil { - errs.addError("%v", errors.Wrapf(err, "failed to create OSD %d on PVC %q", osd.ID, nodeOrPVCName)) - } - } else { - logger.Infof("creating OSD %d on node %q", osd.ID, nodeOrPVCName) - err := createDaemonOnNodeFunc(c.cluster, osd, nodeOrPVCName, c.provisionConfig) - if err != nil { - errs.addError("%v", errors.Wrapf(err, "failed to create OSD %d on node %q", osd.ID, nodeOrPVCName)) - } - } - } - - c.doneWithStatus(nodeOrPVCName) -} - -// Call this if createNewOSDsFromStatus() isn't going to be called (like for a failed status) -func (c *createConfig) doneWithStatus(nodeOrPVCName string) { - c.finishedStatusConfigMaps.Insert(statusConfigMapName(nodeOrPVCName)) -} - -// Returns a set of all the awaitingStatusConfigMaps that will be updated by provisioning jobs. -// Returns error only if the calling function should halt all OSD provisioning. Non-halting errors -// are added to provisionErrors. -// -// Creation of prepare jobs is most directly related to creating new OSDs. And we want to keep all -// usage of awaitingStatusConfigMaps in this file. -func (c *Cluster) startProvisioningOverPVCs(config *provisionConfig, errs *provisionErrors) (sets.String, error) { - // Parsing storageClassDeviceSets and parsing it to volume sources - c.prepareStorageClassDeviceSets(errs) - - // no valid VolumeSource is ready to run an osd - if len(c.deviceSets) == 0 { - logger.Info("no storageClassDeviceSets defined to configure OSDs on PVCs") - return sets.NewString(), nil - } - - // Check k8s version - k8sVersion, err := k8sutil.GetK8SVersion(c.context.Clientset) - if err != nil { - errs.addError("failed to provision OSDs on PVCs. user has specified storageClassDeviceSets, but the Kubernetes version could not be determined. minimum Kubernetes version required: 1.13.0. %v", err) - return sets.NewString(), nil - } - if !k8sVersion.AtLeast(version.MustParseSemantic("v1.13.0")) { - errs.addError("failed to provision OSDs on PVCs. user has specified storageClassDeviceSets, but the Kubernetes version is not supported. user must update Kubernetes version. minimum Kubernetes version required: 1.13.0. version detected: %s", k8sVersion.String()) - return sets.NewString(), nil - } - - existingDeployments, err := c.getExistingOSDDeploymentsOnPVCs() - if err != nil { - errs.addError("failed to provision OSDs on PVCs. failed to query existing OSD deployments on PVCs. %v", err) - return sets.NewString(), nil - } - - awaitingStatusConfigMaps := sets.NewString() - for _, volume := range c.deviceSets { - // Check whether we need to cancel the orchestration - if err := opcontroller.CheckForCancelledOrchestration(c.context); err != nil { - return awaitingStatusConfigMaps, err - } - - dataSource, dataOK := volume.PVCSources[bluestorePVCData] - - // The data PVC template is required. - if !dataOK { - errs.addError("failed to create OSD provisioner for storageClassDeviceSet %q. missing the data template", volume.Name) - continue - } - - metadataSource, metadataOK := volume.PVCSources[bluestorePVCMetadata] - if metadataOK { - logger.Infof("OSD will have its main bluestore block on %q and its metadata device on %q", dataSource.ClaimName, metadataSource.ClaimName) - } else { - logger.Infof("OSD will have its main bluestore block on %q", dataSource.ClaimName) - } - - walSource, walOK := volume.PVCSources[bluestorePVCWal] - if walOK { - logger.Infof("OSD will have its wal device on %q", walSource.ClaimName) - } - - osdProps := osdProperties{ - crushHostname: dataSource.ClaimName, - pvc: dataSource, - metadataPVC: metadataSource, - walPVC: walSource, - resources: volume.Resources, - placement: volume.Placement, - preparePlacement: volume.PreparePlacement, - portable: volume.Portable, - schedulerName: volume.SchedulerName, - encrypted: volume.Encrypted, - deviceSetName: volume.Name, - } - osdProps.storeConfig.DeviceClass = volume.CrushDeviceClass - - if osdProps.encrypted { - // If the deviceSet template has "encrypted" but the Ceph version is not compatible - if !c.isCephVolumeRawModeSupported() { - errMsg := fmt.Sprintf("failed to validate storageClassDeviceSet %q. min required ceph version to support encryption is %q or %q", volume.Name, cephVolumeRawEncryptionModeMinNautilusCephVersion.String(), cephVolumeRawEncryptionModeMinOctopusCephVersion.String()) - errs.addError(errMsg) - continue - } - - // create encryption Kubernetes Secret if the PVC is encrypted - key, err := generateDmCryptKey() - if err != nil { - errMsg := fmt.Sprintf("failed to generate dmcrypt key for osd claim %q. %v", osdProps.pvc.ClaimName, err) - errs.addError(errMsg) - continue - } - - // Initialize the KMS code - kmsConfig := kms.NewConfig(c.context, &c.spec, c.clusterInfo) - - // We could set an env var in the Operator or a global var instead of the API call? - // Hopefully, the API is cheap and we can always retrieve the token if it has changed... - if c.spec.Security.KeyManagementService.IsTokenAuthEnabled() { - err := kms.SetTokenToEnvVar(c.context, c.spec.Security.KeyManagementService.TokenSecretName, kmsConfig.Provider, c.clusterInfo.Namespace) - if err != nil { - errMsg := fmt.Sprintf("failed to fetch kms token secret %q. %v", c.spec.Security.KeyManagementService.TokenSecretName, err) - errs.addError(errMsg) - continue - } - } - - // Generate and store the encrypted key in whatever KMS is configured - err = kmsConfig.PutSecret(osdProps.pvc.ClaimName, key) - if err != nil { - errMsg := fmt.Sprintf("failed to store secret. %v", err) - errs.addError(errMsg) - continue - } - } - - // Skip OSD prepare if deployment already exists for the PVC - if existingDeployments.Has(dataSource.ClaimName) { - logger.Debugf("skipping OSD prepare job creation for PVC %q because OSD daemon using the PVC already exists", osdProps.crushHostname) - continue - } - - // Update the orchestration status of this pvc to the starting state - status := OrchestrationStatus{Status: OrchestrationStatusStarting, PvcBackedOSD: true} - cmName := c.updateOSDStatus(osdProps.crushHostname, status) - - if err := c.runPrepareJob(&osdProps, config); err != nil { - c.handleOrchestrationFailure(errs, osdProps.crushHostname, "%v", err) - c.deleteStatusConfigMap(osdProps.crushHostname) - continue // do not record the status CM's name - } - - // record the name of the status configmap that will eventually receive results from the - // OSD provisioning job we just created. This will help us determine when we are done - // processing the results of provisioning jobs. - awaitingStatusConfigMaps.Insert(cmName) - } - - return awaitingStatusConfigMaps, nil -} - -// Returns a set of all the awaitingStatusConfigMaps that will be updated by provisioning jobs. -// Returns error only if the calling function should halt all OSD provisioning. Non-halting errors -// are added to provisionErrors. -// -// Creation of prepare jobs is most directly related to creating new OSDs. And we want to keep all -// usage of awaitingStatusConfigMaps in this file. -func (c *Cluster) startProvisioningOverNodes(config *provisionConfig, errs *provisionErrors) (sets.String, error) { - if !c.spec.Storage.UseAllNodes && len(c.spec.Storage.Nodes) == 0 { - logger.Info("no nodes are defined for configuring OSDs on raw devices") - return sets.NewString(), nil - } - - if c.spec.Storage.UseAllNodes { - if len(c.spec.Storage.Nodes) > 0 { - logger.Warningf("useAllNodes is TRUE, but nodes are specified. NODES in the cluster CR will be IGNORED unless useAllNodes is FALSE.") - } - - // Get the list of all nodes in the cluster. The placement settings will be applied below. - hostnameMap, err := k8sutil.GetNodeHostNames(c.context.Clientset) - if err != nil { - errs.addError("failed to provision OSDs on nodes. failed to get node hostnames. %v", err) - return sets.NewString(), nil - } - c.spec.Storage.Nodes = nil - for _, hostname := range hostnameMap { - storageNode := cephv1.Node{ - Name: hostname, - } - c.spec.Storage.Nodes = append(c.spec.Storage.Nodes, storageNode) - } - logger.Debugf("storage nodes: %+v", c.spec.Storage.Nodes) - } - // generally speaking, this finds nodes which are capable of running new osds - validNodes := k8sutil.GetValidNodes(c.spec.Storage, c.context.Clientset, cephv1.GetOSDPlacement(c.spec.Placement)) - - logger.Infof("%d of the %d storage nodes are valid", len(validNodes), len(c.spec.Storage.Nodes)) - - c.ValidStorage = *c.spec.Storage.DeepCopy() - c.ValidStorage.Nodes = validNodes - - // no valid node is ready to run an osd - if len(validNodes) == 0 { - logger.Warningf("no valid nodes available to run osds on nodes in namespace %q", c.clusterInfo.Namespace) - return sets.NewString(), nil - } - - if len(c.spec.DataDirHostPath) == 0 { - errs.addError("failed to provision OSDs on nodes. user has specified valid nodes for storage, but dataDirHostPath is empty. user must set CephCluster dataDirHostPath") - return sets.NewString(), nil - } - - awaitingStatusConfigMaps := sets.NewString() - for _, node := range c.ValidStorage.Nodes { - // Check whether we need to cancel the orchestration - if err := opcontroller.CheckForCancelledOrchestration(c.context); err != nil { - return awaitingStatusConfigMaps, err - } - - // fully resolve the storage config and resources for this node - // don't care about osd device class resources since it will be overwritten later for prepareosd resources - n := c.resolveNode(node.Name, "") - if n == nil { - logger.Warningf("node %q did not resolve", node.Name) - continue - } - - if n.Name == "" { - logger.Warningf("skipping node with a blank name! %+v", n) - continue - } - - // create the job that prepares osds on the node - storeConfig := osdconfig.ToStoreConfig(n.Config) - metadataDevice := osdconfig.MetadataDevice(n.Config) - osdProps := osdProperties{ - crushHostname: n.Name, - devices: n.Devices, - selection: n.Selection, - resources: n.Resources, - storeConfig: storeConfig, - metadataDevice: metadataDevice, - } - - // update the orchestration status of this node to the starting state - status := OrchestrationStatus{Status: OrchestrationStatusStarting} - cmName := c.updateOSDStatus(n.Name, status) - - if err := c.runPrepareJob(&osdProps, config); err != nil { - c.handleOrchestrationFailure(errs, n.Name, "%v", err) - c.deleteStatusConfigMap(n.Name) - continue // do not record the status CM's name - } - - // record the name of the status configmap that will eventually receive results from the - // OSD provisioning job we just created. This will help us determine when we are done - // processing the results of provisioning jobs. - awaitingStatusConfigMaps.Insert(cmName) - } - - return awaitingStatusConfigMaps, nil -} - -func (c *Cluster) runPrepareJob(osdProps *osdProperties, config *provisionConfig) error { - nodeOrPVC := "node" - if osdProps.onPVC() { - nodeOrPVC = "PVC" - } - nodeOrPVCName := osdProps.crushHostname - - job, err := c.makeJob(*osdProps, config) - if err != nil { - return errors.Wrapf(err, "failed to generate osd provisioning job template for %s %q", nodeOrPVC, nodeOrPVCName) - } - - if err := k8sutil.RunReplaceableJob(c.context.Clientset, job, false); err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to run provisioning job for %s %q", nodeOrPVC, nodeOrPVCName) - } - logger.Infof("letting preexisting OSD provisioning job run to completion for %s %q", nodeOrPVC, nodeOrPVCName) - return nil - } - - logger.Infof("started OSD provisioning job for %s %q", nodeOrPVC, nodeOrPVCName) - return nil -} - -func createDaemonOnPVC(c *Cluster, osd OSDInfo, pvcName string, config *provisionConfig) error { - d, err := deploymentOnPVC(c, osd, pvcName, config) - if err != nil { - return err - } - - message := fmt.Sprintf("Processing OSD %d on PVC %q", osd.ID, pvcName) - updateConditionFunc(c.context, c.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) - - _, err = k8sutil.CreateDeployment(c.context.Clientset, d) - return errors.Wrapf(err, "failed to create deployment for OSD %d on PVC %q", osd.ID, pvcName) -} - -func createDaemonOnNode(c *Cluster, osd OSDInfo, nodeName string, config *provisionConfig) error { - d, err := deploymentOnNode(c, osd, nodeName, config) - if err != nil { - return err - } - - message := fmt.Sprintf("Processing OSD %d on node %q", osd.ID, nodeName) - updateConditionFunc(c.context, c.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) - - _, err = k8sutil.CreateDeployment(c.context.Clientset, d) - return errors.Wrapf(err, "failed to create deployment for OSD %d on node %q", osd.ID, nodeName) -} diff --git a/pkg/operator/ceph/cluster/osd/create_test.go b/pkg/operator/ceph/cluster/osd/create_test.go deleted file mode 100644 index 5272da8ea..000000000 --- a/pkg/operator/ceph/cluster/osd/create_test.go +++ /dev/null @@ -1,704 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "context" - "strings" - "testing" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - "github.com/tevino/abool" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - apiresource "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/kubernetes/fake" - k8stesting "k8s.io/client-go/testing" -) - -func Test_createNewOSDsFromStatus(t *testing.T) { - namespace := "my-namespace" - - clientset := fake.NewSimpleClientset() - // clusterd.Context is created in doSetup() - - // names of status configmaps for nodes - statusNameNode0 := statusConfigMapName("node0") - statusNameNode2 := statusConfigMapName("node2") - statusNamePVC1 := statusConfigMapName("pvc1") - statusNamePVC2 := statusConfigMapName("pvc2") - - clusterInfo := &cephclient.ClusterInfo{ - Namespace: namespace, - } - clusterInfo.SetName("mycluster") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - - var oldCreateDaemonOnNodeFunc = createDaemonOnNodeFunc - defer func() { - createDaemonOnNodeFunc = oldCreateDaemonOnNodeFunc - }() - createCallsOnNode := []int{} - induceFailureCreatingOSD := -1 // allow causing the create call to fail for a given OSD ID - createDaemonOnNodeFunc = func(c *Cluster, osd OSDInfo, nodeName string, config *provisionConfig) error { - createCallsOnNode = append(createCallsOnNode, osd.ID) - if induceFailureCreatingOSD == osd.ID { - return errors.Errorf("createOSDDaemonOnNode: induced failure on OSD %d", osd.ID) - } - return nil - } - - var oldCreateDaemonOnPVCFunc = createDaemonOnPVCFunc - defer func() { - createDaemonOnPVCFunc = oldCreateDaemonOnPVCFunc - }() - createCallsOnPVC := []int{} - // reuse induceFailureCreatingOSD from above - createDaemonOnPVCFunc = func(c *Cluster, osd OSDInfo, pvcName string, config *provisionConfig) error { - createCallsOnPVC = append(createCallsOnPVC, osd.ID) - if induceFailureCreatingOSD == osd.ID { - return errors.Errorf("createOSDDaemonOnNode: induced failure on OSD %d", osd.ID) - } - return nil - } - - // Simulate an environment where deployments exist for OSDs 3, 4, and 6 - deployments := newExistenceListWithCapacity(5) - deployments.Add(3) - deployments.Add(4) - deployments.Add(6) - - spec := cephv1.ClusterSpec{} - var status *OrchestrationStatus - awaitingStatusConfigMaps := sets.NewString() - - var c *Cluster - var createConfig *createConfig - var errs *provisionErrors - doSetup := func() { - // none of this code should ever add or remove deployments from the existence list - assert.Equal(t, 3, deployments.Len()) - // Simulate environment where provision jobs were created for node0, node2, pvc1, and pvc2 - awaitingStatusConfigMaps = sets.NewString() - awaitingStatusConfigMaps.Insert( - statusNameNode0, statusNameNode2, - statusNamePVC1, statusNamePVC2) - createCallsOnNode = createCallsOnNode[:0] - createCallsOnPVC = createCallsOnPVC[:0] - errs = newProvisionErrors() - ctx := &clusterd.Context{ - Clientset: clientset, - } - c = New(ctx, clusterInfo, spec, "rook/rook:master") - config := c.newProvisionConfig() - createConfig = c.newCreateConfig(config, awaitingStatusConfigMaps, deployments) - } - - t.Run("node: create no OSDs when none are returned from node", func(t *testing.T) { - doSetup() - status = &OrchestrationStatus{ - OSDs: []OSDInfo{}, - PvcBackedOSD: false, - } - createConfig.createNewOSDsFromStatus(status, "node0", errs) - assert.Zero(t, errs.len()) - assert.Len(t, createCallsOnNode, 0) - assert.Len(t, createCallsOnPVC, 0) - // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) - assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNameNode0)) - }) - - t.Run("test: node: create all OSDs on node when all do not exist", func(t *testing.T) { - doSetup() - status = &OrchestrationStatus{ - OSDs: []OSDInfo{ - {ID: 0}, {ID: 1}, {ID: 2}, - }, - PvcBackedOSD: false, - } - createConfig.createNewOSDsFromStatus(status, "node2", errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, createCallsOnNode, []int{0, 1, 2}) - assert.Len(t, createCallsOnPVC, 0) - // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) - assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNameNode2)) - }) - - t.Run("node: create only nonexistent OSDs on node when some already exist", func(t *testing.T) { - doSetup() - status = &OrchestrationStatus{ - OSDs: []OSDInfo{ - {ID: 3}, {ID: 4}, // already exist - {ID: 5}, // does not exist - {ID: 6}, // already exists - {ID: 7}, // does not exist - }, - PvcBackedOSD: false, - } - createConfig.createNewOSDsFromStatus(status, "node0", errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, createCallsOnNode, []int{5, 7}) - assert.Len(t, createCallsOnPVC, 0) - // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) - assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNameNode0)) - }) - - t.Run("node: skip creating OSDs for status configmaps that weren't created for this reconcile", func(t *testing.T) { - doSetup() - status = &OrchestrationStatus{ - OSDs: []OSDInfo{ - {ID: 0}, {ID: 1}, {ID: 2}, - }, - PvcBackedOSD: false, - } - createConfig.createNewOSDsFromStatus(status, "node1", errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, createCallsOnNode, []int{}) - assert.Len(t, createCallsOnPVC, 0) - // status map should NOT have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 0, createConfig.finishedStatusConfigMaps.Len()) - }) - - t.Run("node: errors reported if OSDs fail to create", func(t *testing.T) { - induceFailureCreatingOSD = 1 // fail when creating OSD 1 - doSetup() - status = &OrchestrationStatus{ - OSDs: []OSDInfo{ - {ID: 0}, {ID: 1}, {ID: 2}, - }, - PvcBackedOSD: false, - } - createConfig.createNewOSDsFromStatus(status, "node0", errs) - assert.Equal(t, 1, errs.len()) - assert.ElementsMatch(t, createCallsOnNode, []int{0, 1, 2}) - assert.Len(t, createCallsOnPVC, 0) - // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) - assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNameNode0)) - induceFailureCreatingOSD = -1 // off - }) - - t.Run("pvc: create no OSDs when none are returned from PVC", func(t *testing.T) { - doSetup() - status = &OrchestrationStatus{ - OSDs: []OSDInfo{}, - PvcBackedOSD: true, - } - createConfig.createNewOSDsFromStatus(status, "pvc1", errs) - assert.Zero(t, errs.len()) - assert.Len(t, createCallsOnNode, 0) - assert.Len(t, createCallsOnPVC, 0) - // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) - assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNamePVC1)) - }) - - t.Run("pvc: create all OSDs on pvc when all do not exist", func(t *testing.T) { - doSetup() - status = &OrchestrationStatus{ - OSDs: []OSDInfo{ - {ID: 0}, {ID: 1}, {ID: 2}, - }, - PvcBackedOSD: true, - } - createConfig.createNewOSDsFromStatus(status, "pvc2", errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, createCallsOnPVC, []int{0, 1, 2}) - assert.Len(t, createCallsOnNode, 0) - // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) - assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNamePVC2)) - }) - - t.Run("pvc: create only nonexistent OSDs on pvc when some already exist", func(t *testing.T) { - doSetup() - status = &OrchestrationStatus{ - OSDs: []OSDInfo{ - {ID: 3}, {ID: 4}, // already exist - {ID: 5}, // does not exist - {ID: 6}, // already exists - {ID: 7}, // does not exist - }, - PvcBackedOSD: true, - } - createConfig.createNewOSDsFromStatus(status, "pvc1", errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, createCallsOnPVC, []int{5, 7}) - assert.Len(t, createCallsOnNode, 0) - // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) - assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNamePVC1)) - }) - - t.Run("pvc: skip creating OSDs for status configmaps that weren't created for this reconcile", func(t *testing.T) { - doSetup() - status = &OrchestrationStatus{ - OSDs: []OSDInfo{ - {ID: 0}, {ID: 1}, {ID: 2}, - }, - PvcBackedOSD: true, - } - createConfig.createNewOSDsFromStatus(status, "pvc0", errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, createCallsOnPVC, []int{}) - assert.Len(t, createCallsOnNode, 0) - // no status maps should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 0, createConfig.finishedStatusConfigMaps.Len()) - }) - - t.Run("pvc: errors reported if OSDs fail to create", func(t *testing.T) { - induceFailureCreatingOSD = 1 // fail when creating OSD 1 - doSetup() - status = &OrchestrationStatus{ - OSDs: []OSDInfo{ - {ID: 0}, {ID: 1}, {ID: 2}, - }, - PvcBackedOSD: true, - } - createConfig.createNewOSDsFromStatus(status, "pvc1", errs) - assert.Equal(t, 1, errs.len()) - assert.ElementsMatch(t, createCallsOnPVC, []int{0, 1, 2}) - assert.Len(t, createCallsOnNode, 0) - // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) - assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNamePVC1)) - induceFailureCreatingOSD = -1 // off - }) -} - -func Test_startProvisioningOverPVCs(t *testing.T) { - namespace := "rook-ceph" - - clientset := test.NewComplexClientset(t) // fake clientset with generate name functionality - - requestCancelOrchestration := *abool.New() - // clusterd.Context is created in doSetup() - - clusterInfo := &cephclient.ClusterInfo{ - Namespace: namespace, - CephVersion: cephver.Nautilus, - } - clusterInfo.SetName("mycluster") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - - spec := cephv1.ClusterSpec{} - fakeK8sVersion := "v1.13.0" - - var errs *provisionErrors - var c *Cluster - var config *provisionConfig - var awaitingStatusConfigMaps sets.String - var err error - doSetup := func() { - test.SetFakeKubernetesVersion(clientset, fakeK8sVersion) // PVCs require k8s version v1.13+ - errs = newProvisionErrors() - ctx := &clusterd.Context{ - Clientset: clientset, - RequestCancelOrchestration: &requestCancelOrchestration, - } - c = New(ctx, clusterInfo, spec, "rook/rook:master") - config = c.newProvisionConfig() - } - - t.Run("do nothing if no storage spec is given", func(t *testing.T) { - spec = cephv1.ClusterSpec{} - doSetup() - awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) - assert.NoError(t, err) - assert.Zero(t, awaitingStatusConfigMaps.Len()) - assert.Zero(t, errs.len()) - // no result configmaps should have been created - cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cms.Items, 0) - }) - - t.Run("do nothing if device set count is zero", func(t *testing.T) { - spec = cephv1.ClusterSpec{ - Storage: cephv1.StorageScopeSpec{ - StorageClassDeviceSets: []cephv1.StorageClassDeviceSet{ - { - Name: "set1", - Count: 0, - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ - newDummyPVC("data", namespace, "10Gi", "gp2"), - }, - }, - }, - }, - } - doSetup() - awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) - assert.NoError(t, err) - assert.Zero(t, awaitingStatusConfigMaps.Len()) - assert.Zero(t, errs.len()) // this was not a problem with a single job but with ALL jobs - // no result configmaps should have been created - cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cms.Items, 0) - }) - - t.Run("one device set with 2 PVCs", func(t *testing.T) { - spec = cephv1.ClusterSpec{ - Storage: cephv1.StorageScopeSpec{ - StorageClassDeviceSets: []cephv1.StorageClassDeviceSet{ - { - Name: "set1", - Count: 2, - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ - newDummyPVC("data", namespace, "10Gi", "gp2"), - }, - }, - }, - }, - } - doSetup() - awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) - assert.NoError(t, err) - assert.Equal(t, 2, awaitingStatusConfigMaps.Len()) - assert.Zero(t, errs.len()) - cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cms.Items, 2) - }) - - t.Run("repeat same device set with 2 PVCs (before provisioning jobs are done and before OSD deployments are created)", func(t *testing.T) { - // spec = - doSetup() - awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) - assert.NoError(t, err) - assert.Equal(t, 2, awaitingStatusConfigMaps.Len()) - assert.Zero(t, errs.len()) - cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cms.Items, 2) // still just 2 configmaps should exist (the same 2 from before) - }) - - t.Run("error if k8s version not high enough", func(t *testing.T) { - // spec = - clientset = test.NewComplexClientset(t) // reset to empty fake k8s environment - fakeK8sVersion = "v1.12.7" - doSetup() - awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) - assert.NoError(t, err) - assert.Equal(t, 0, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 1, errs.len()) - cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cms.Items, 0) - fakeK8sVersion = "v1.13.0" - }) - - t.Run("request cancel orchestration", func(t *testing.T) { - // spec = - clientset = test.NewComplexClientset(t) // reset to empty fake k8s environment - requestCancelOrchestration.Set() - doSetup() - awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) - assert.Error(t, err) - assert.Zero(t, errs.len()) - assert.Zero(t, awaitingStatusConfigMaps.Len()) - requestCancelOrchestration.UnSet() - }) - - t.Run("error if no volume claim template", func(t *testing.T) { - spec = cephv1.ClusterSpec{ - Storage: cephv1.StorageScopeSpec{ - StorageClassDeviceSets: []cephv1.StorageClassDeviceSet{ - { - Name: "set1", - Count: 2, - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{}, - }, - }, - }, - } - clientset = test.NewComplexClientset(t) // reset to empty fake k8s environment - doSetup() - awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) - assert.NoError(t, err) - assert.Equal(t, 0, awaitingStatusConfigMaps.Len()) - assert.Equal(t, 1, errs.len()) - cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cms.Items, 0) - }) - - // TODO: should we verify the osdProps set on the job? -} - -func Test_startProvisioningOverNodes(t *testing.T) { - namespace := "rook-ceph" - dataDirHostPath := "/var/lib/mycluster" - - clientset := test.New(t, 3) // fake clientset with 3 nodes - requestCancelOrchestration := *abool.New() - // clusterd.Context is created in doSetup() - - // names of status configmaps for nodes - statusNameNode0 := statusConfigMapName("node0") - statusNameNode1 := statusConfigMapName("node1") - statusNameNode2 := statusConfigMapName("node2") - // statusNameNode3 := statusConfigMapName("node3") - - clusterInfo := &cephclient.ClusterInfo{ - Namespace: namespace, - CephVersion: cephver.Nautilus, - } - clusterInfo.SetName("mycluster") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - - var useAllDevices bool - spec := cephv1.ClusterSpec{} - - var errs *provisionErrors - var c *Cluster - var config *provisionConfig - var prepareJobsRun sets.String - var err error - var cms *corev1.ConfigMapList - doSetup := func() { - errs = newProvisionErrors() - ctx := &clusterd.Context{ - Clientset: clientset, - RequestCancelOrchestration: &requestCancelOrchestration, - } - c = New(ctx, clusterInfo, spec, "rook/rook:master") - config = c.newProvisionConfig() - } - - t.Run("do nothing if no storage spec is given", func(t *testing.T) { - spec = cephv1.ClusterSpec{} - doSetup() - prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) - assert.NoError(t, err) - assert.Zero(t, prepareJobsRun.Len()) - assert.Zero(t, errs.len()) - // no result configmaps should have been created - cms, err = clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cms.Items, 0) - }) - - t.Run("error on empty dataDirHostPath", func(t *testing.T) { - useAllDevices = true - spec = cephv1.ClusterSpec{ - // this storage spec should cause prepare jobs to run on all (3) nodes - Storage: cephv1.StorageScopeSpec{ - UseAllNodes: true, - Selection: cephv1.Selection{ - UseAllDevices: &useAllDevices, - }, - }, - // BUT empty should not allow any jobs to be created - DataDirHostPath: "", - } - doSetup() - prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) - assert.NoError(t, err) - assert.Zero(t, prepareJobsRun.Len()) - assert.Equal(t, 1, errs.len()) // this was not a problem with a single job but with ALL jobs - // no result configmaps should have been created - cms, err = clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cms.Items, 0) - }) - - t.Run("use all nodes and devices", func(t *testing.T) { - // Setting dataDirHostPath non-empty on the previous config should have jobs run for all nodes - spec.DataDirHostPath = dataDirHostPath - doSetup() - prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) - assert.NoError(t, err) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, - []string{statusNameNode0, statusNameNode1, statusNameNode2}, - prepareJobsRun.List(), - ) - // all result configmaps should have been created - cms, err = clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cms.Items, 3) - }) - - t.Run("use all nodes and devices when useAllNodes and individual nodes are both set", func(t *testing.T) { - // this also tests that jobs that currently exist (created in previous test) are handled - spec.Storage.Nodes = []cephv1.Node{ - {Name: "node0"}, {Name: "node2"}, - } - doSetup() - prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) - assert.NoError(t, err) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, - []string{statusNameNode0, statusNameNode1, statusNameNode2}, - prepareJobsRun.List(), - ) - }) - - t.Run("use individual nodes", func(t *testing.T) { - // this also tests that existing status configmaps (from the previous tests) don't affect the - // reported status configmaps from this run - spec = cephv1.ClusterSpec{ - Storage: cephv1.StorageScopeSpec{ - UseAllNodes: false, - Nodes: []cephv1.Node{ - // run on only node0 and node2 - {Name: "node0"}, - {Name: "node2"}, - }, - Selection: cephv1.Selection{ - UseAllDevices: &useAllDevices, - }, - }, - DataDirHostPath: dataDirHostPath, - } - doSetup() - prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) - assert.NoError(t, err) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, - []string{statusNameNode0, statusNameNode2}, - prepareJobsRun.List(), - ) - }) - - t.Run("request cancel orchestration", func(t *testing.T) { - requestCancelOrchestration.Set() - doSetup() - prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) - assert.Error(t, err) - assert.Zero(t, errs.len()) - assert.Zero(t, prepareJobsRun.Len()) - requestCancelOrchestration.UnSet() - }) - - t.Run("use no nodes", func(t *testing.T) { - spec = cephv1.ClusterSpec{ - Storage: cephv1.StorageScopeSpec{ - UseAllNodes: false, - Nodes: []cephv1.Node{ - // empty - }, - Selection: cephv1.Selection{ - UseAllDevices: &useAllDevices, - }, - }, - DataDirHostPath: dataDirHostPath, - } - doSetup() - prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) - assert.NoError(t, err) - assert.Zero(t, errs.len()) - assert.Zero(t, prepareJobsRun.Len()) - }) - - t.Run("failures running prepare jobs", func(t *testing.T) { - spec = cephv1.ClusterSpec{ - Storage: cephv1.StorageScopeSpec{ - UseAllNodes: false, - Nodes: []cephv1.Node{ - // run on only node0 and node2 - {Name: "node0"}, - {Name: "node2"}, - }, - Selection: cephv1.Selection{ - UseAllDevices: &useAllDevices, - }, - }, - DataDirHostPath: dataDirHostPath, - } - // re-initialize an empty test clientset with 3 nodes - clientset = test.New(t, 3) - // add a job reactor that will cause the node2 job to fail - var jobReactor k8stesting.ReactionFunc = func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { - switch action := action.(type) { - case k8stesting.CreateActionImpl: - obj := action.GetObject() - objMeta, err := meta.Accessor(obj) - if err != nil { - panic(err) - } - objName := objMeta.GetName() - if strings.Contains(objName, "node2") { - return true, nil, errors.Errorf("induced error") - } - default: - panic("this should not happen") - } - return false, nil, nil - } - clientset.PrependReactor("create", "jobs", jobReactor) - doSetup() - prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) - assert.NoError(t, err) - assert.Equal(t, 1, errs.len()) - assert.ElementsMatch(t, - []string{statusNameNode0}, - prepareJobsRun.List(), - ) - // with a fresh clientset, only the one results ConfigMap should exist - cms, err = clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cms.Items, 1) - assert.Equal(t, prepareJobsRun.List()[0], cms.Items[0].Name) - }) -} - -func newDummyPVC(name, namespace string, capacity string, storageClassName string) corev1.PersistentVolumeClaim { - volMode := corev1.PersistentVolumeBlock - return corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceName(corev1.ResourceStorage): apiresource.MustParse(capacity), - }, - }, - StorageClassName: &storageClassName, - VolumeMode: &volMode, - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - }, - } -} diff --git a/pkg/operator/ceph/cluster/osd/deviceSet.go b/pkg/operator/ceph/cluster/osd/deviceSet.go deleted file mode 100644 index 3081ecf1a..000000000 --- a/pkg/operator/ceph/cluster/osd/deviceSet.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "context" - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" -) - -// deviceSet is the processed version of the StorageClassDeviceSet -type deviceSet struct { - // Name is the name of the volume source - Name string - // PVCSources - PVCSources map[string]v1.PersistentVolumeClaimVolumeSource - // CrushDeviceClass represents the crush device class for an OSD - CrushDeviceClass string - // CrushInitialWeight represents initial OSD weight in TiB units - CrushInitialWeight string - // CrushPrimaryAffinity represents initial OSD primary-affinity within range [0, 1] - CrushPrimaryAffinity string - // Size represents the size requested for the PVC - Size string - // Resources requests/limits for the devices - Resources v1.ResourceRequirements - // Placement constraints for the device daemons - Placement cephv1.Placement - // Placement constraints for the device preparation - PreparePlacement *cephv1.Placement - // Provider-specific device configuration - Config map[string]string - // Portable represents OSD portability across the hosts - Portable bool - // TuneSlowDeviceClass Tune the OSD when running on a slow Device Class - TuneSlowDeviceClass bool - // TuneFastDeviceClass Tune the OSD when running on a fast Device Class - TuneFastDeviceClass bool - // Scheduler name for OSD pod placement - SchedulerName string - // Whether to encrypt the deviceSet - Encrypted bool -} - -func (c *Cluster) prepareStorageClassDeviceSets(errs *provisionErrors) { - c.deviceSets = []deviceSet{} - - existingPVCs, uniqueOSDsPerDeviceSet, err := GetExistingPVCs(c.context, c.clusterInfo.Namespace) - if err != nil { - errs.addError("failed to detect existing OSD PVCs. %v", err) - return - } - - // Iterate over deviceSet - for _, deviceSet := range c.spec.Storage.StorageClassDeviceSets { - if err := controller.CheckPodMemory(cephv1.ResourcesKeyPrepareOSD, deviceSet.Resources, cephOsdPodMinimumMemory); err != nil { - errs.addError("failed to provision OSDs on PVC for storageClassDeviceSet %q. %v", deviceSet.Name, err) - continue - } - // Check if the volume claim template is specified - if len(deviceSet.VolumeClaimTemplates) == 0 { - errs.addError("failed to provision OSDs on PVC for storageClassDeviceSet %q. no volumeClaimTemplate is specified. user must specify a volumeClaimTemplate", deviceSet.Name) - continue - } - - // Iterate through existing PVCs to ensure they are up-to-date, no metadata pvcs are missing, etc - highestExistingID := -1 - countInDeviceSet := 0 - if existingIDs, ok := uniqueOSDsPerDeviceSet[deviceSet.Name]; ok { - logger.Infof("verifying PVCs exist for %d OSDs in device set %q", existingIDs.Len(), deviceSet.Name) - for existingID := range existingIDs { - pvcID, err := strconv.Atoi(existingID) - if err != nil { - errs.addError("invalid PVC index %q found for device set %q", existingID, deviceSet.Name) - continue - } - // keep track of the max PVC index found so we know what index to start with for new OSDs - if pvcID > highestExistingID { - highestExistingID = pvcID - } - deviceSet := c.createDeviceSetPVCsForIndex(deviceSet, existingPVCs, pvcID, errs) - c.deviceSets = append(c.deviceSets, deviceSet) - } - countInDeviceSet = existingIDs.Len() - } - // Create new PVCs if we are not yet at the expected count - // No new PVCs will be created if we have too many - pvcsToCreate := deviceSet.Count - countInDeviceSet - if pvcsToCreate > 0 { - logger.Infof("creating %d new PVCs for device set %q", pvcsToCreate, deviceSet.Name) - } - for i := 0; i < pvcsToCreate; i++ { - pvcID := highestExistingID + i + 1 - deviceSet := c.createDeviceSetPVCsForIndex(deviceSet, existingPVCs, pvcID, errs) - c.deviceSets = append(c.deviceSets, deviceSet) - countInDeviceSet++ - } - } -} - -func (c *Cluster) createDeviceSetPVCsForIndex(newDeviceSet cephv1.StorageClassDeviceSet, existingPVCs map[string]*v1.PersistentVolumeClaim, setIndex int, errs *provisionErrors) deviceSet { - // Create the PVC source for each of the data, metadata, and other types of templates if defined. - pvcSources := map[string]v1.PersistentVolumeClaimVolumeSource{} - - var dataSize string - var crushDeviceClass string - var crushInitialWeight string - var crushPrimaryAffinity string - typesFound := sets.NewString() - for _, pvcTemplate := range newDeviceSet.VolumeClaimTemplates { - if pvcTemplate.Name == "" { - // For backward compatibility a blank name must be treated as a data volume - pvcTemplate.Name = bluestorePVCData - } - if typesFound.Has(pvcTemplate.Name) { - errs.addError("found duplicate volume claim template %q for device set %q", pvcTemplate.Name, newDeviceSet.Name) - continue - } - typesFound.Insert(pvcTemplate.Name) - - pvc, err := c.createDeviceSetPVC(existingPVCs, newDeviceSet.Name, pvcTemplate, setIndex) - if err != nil { - errs.addError("failed to provision PVC for device set %q index %d. %v", newDeviceSet.Name, setIndex, err) - continue - } - - // The PVC type must be from a predefined set such as "data", "metadata", and "wal". These names must be enforced if the wal/db are specified - // with a separate device, but if there is a single volume template we can assume it is always the data template. - pvcType := pvcTemplate.Name - if len(newDeviceSet.VolumeClaimTemplates) == 1 { - pvcType = bluestorePVCData - } - - if pvcType == bluestorePVCData { - pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] - dataSize = pvcSize.String() - crushDeviceClass = pvcTemplate.Annotations["crushDeviceClass"] - } - crushInitialWeight = pvcTemplate.Annotations["crushInitialWeight"] - crushPrimaryAffinity = pvcTemplate.Annotations["crushPrimaryAffinity"] - - pvcSources[pvcType] = v1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvc.GetName(), - ReadOnly: false, - } - } - - return deviceSet{ - Name: newDeviceSet.Name, - Resources: newDeviceSet.Resources, - Placement: newDeviceSet.Placement, - PreparePlacement: newDeviceSet.PreparePlacement, - Config: newDeviceSet.Config, - Size: dataSize, - PVCSources: pvcSources, - Portable: newDeviceSet.Portable, - TuneSlowDeviceClass: newDeviceSet.TuneSlowDeviceClass, - TuneFastDeviceClass: newDeviceSet.TuneFastDeviceClass, - SchedulerName: newDeviceSet.SchedulerName, - CrushDeviceClass: crushDeviceClass, - CrushInitialWeight: crushInitialWeight, - CrushPrimaryAffinity: crushPrimaryAffinity, - Encrypted: newDeviceSet.Encrypted, - } -} - -func (c *Cluster) createDeviceSetPVC(existingPVCs map[string]*v1.PersistentVolumeClaim, deviceSetName string, pvcTemplate v1.PersistentVolumeClaim, setIndex int) (*v1.PersistentVolumeClaim, error) { - ctx := context.TODO() - // old labels and PVC ID for backward compatibility - pvcID := legacyDeviceSetPVCID(deviceSetName, setIndex) - - // check for the existence of the pvc - existingPVC, ok := existingPVCs[pvcID] - if !ok { - // The old name of the PVC didn't exist, now try the new PVC name and label - pvcID = deviceSetPVCID(deviceSetName, pvcTemplate.GetName(), setIndex) - existingPVC = existingPVCs[pvcID] - } - pvc := makeDeviceSetPVC(deviceSetName, pvcID, setIndex, pvcTemplate, c.clusterInfo.Namespace) - err := c.clusterInfo.OwnerInfo.SetControllerReference(pvc) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to osd pvc %q", pvc.Name) - } - - if existingPVC != nil { - logger.Infof("OSD PVC %q already exists", existingPVC.Name) - - // Update the PVC in case the size changed - k8sutil.ExpandPVCIfRequired(c.context.Client, pvc, existingPVC) - return existingPVC, nil - } - - // No PVC found, creating a new one - deployedPVC, err := c.context.Clientset.CoreV1().PersistentVolumeClaims(c.clusterInfo.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "failed to create PVC %q for device set %q", pvc.Name, deviceSetName) - } - logger.Infof("successfully provisioned PVC %q", deployedPVC.Name) - - return deployedPVC, nil -} - -func makeDeviceSetPVC(deviceSetName, pvcID string, setIndex int, pvcTemplate v1.PersistentVolumeClaim, namespace string) *v1.PersistentVolumeClaim { - pvcLabels := makeStorageClassDeviceSetPVCLabel(deviceSetName, pvcID, setIndex) - - // Add user provided labels to pvcTemplates - for k, v := range pvcTemplate.GetLabels() { - pvcLabels[k] = v - } - - // pvc naming format rook-ceph-osd---- - return &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - // Use a generated name to avoid the possibility of two OSDs being created with the same ID. - // If one is removed and a new one is created later with the same ID, the OSD would fail to start. - GenerateName: pvcID, - Namespace: namespace, - Labels: pvcLabels, - Annotations: pvcTemplate.Annotations, - }, - Spec: pvcTemplate.Spec, - } -} - -// GetExistingPVCs fetches the list of OSD PVCs -func GetExistingPVCs(clusterdContext *clusterd.Context, namespace string) (map[string]*v1.PersistentVolumeClaim, map[string]sets.String, error) { - ctx := context.TODO() - selector := metav1.ListOptions{LabelSelector: CephDeviceSetPVCIDLabelKey} - pvcs, err := clusterdContext.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(ctx, selector) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to detect PVCs") - } - result := map[string]*v1.PersistentVolumeClaim{} - uniqueOSDsPerDeviceSet := map[string]sets.String{} - for i, pvc := range pvcs.Items { - // Populate the PVCs based on their unique name across all the device sets - pvcID := pvc.Labels[CephDeviceSetPVCIDLabelKey] - result[pvcID] = &pvcs.Items[i] - - // Create a map of the PVC IDs available in each device set based on PVC index - deviceSet := pvc.Labels[CephDeviceSetLabelKey] - pvcIndex := pvc.Labels[CephSetIndexLabelKey] - if _, ok := uniqueOSDsPerDeviceSet[deviceSet]; !ok { - uniqueOSDsPerDeviceSet[deviceSet] = sets.NewString() - } - uniqueOSDsPerDeviceSet[deviceSet].Insert(pvcIndex) - } - - return result, uniqueOSDsPerDeviceSet, nil -} - -func legacyDeviceSetPVCID(deviceSetName string, setIndex int) string { - return fmt.Sprintf("%s-%d", deviceSetName, setIndex) -} - -// This is the new function that generates the labels -// It includes the pvcTemplateName in it -func deviceSetPVCID(deviceSetName, pvcTemplateName string, setIndex int) string { - cleanName := strings.Replace(pvcTemplateName, " ", "-", -1) - return fmt.Sprintf("%s-%s-%d", deviceSetName, cleanName, setIndex) -} diff --git a/pkg/operator/ceph/cluster/osd/deviceset_test.go b/pkg/operator/ceph/cluster/osd/deviceset_test.go deleted file mode 100644 index 6bf7dd658..000000000 --- a/pkg/operator/ceph/cluster/osd/deviceset_test.go +++ /dev/null @@ -1,272 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package osd - -import ( - "context" - "fmt" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - testexec "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes" - k8stesting "k8s.io/client-go/testing" -) - -func TestPrepareDeviceSets(t *testing.T) { - testPrepareDeviceSets(t, true) - testPrepareDeviceSets(t, false) -} - -func testPrepareDeviceSets(t *testing.T, setTemplateName bool) { - ctx := context.TODO() - clientset := testexec.New(t, 1) - context := &clusterd.Context{ - Clientset: clientset, - } - claim := testVolumeClaim("") - if setTemplateName { - claim.Name = "randomname" - } - deviceSet := cephv1.StorageClassDeviceSet{ - Name: "mydata", - Count: 1, - Portable: true, - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{claim}, - SchedulerName: "custom-scheduler", - } - spec := cephv1.ClusterSpec{ - Storage: cephv1.StorageScopeSpec{StorageClassDeviceSets: []cephv1.StorageClassDeviceSet{deviceSet}}, - } - cluster := &Cluster{ - context: context, - clusterInfo: client.AdminClusterInfo("testns"), - spec: spec, - } - - errs := newProvisionErrors() - cluster.prepareStorageClassDeviceSets(errs) - assert.Equal(t, 1, len(cluster.deviceSets)) - assert.Equal(t, 0, errs.len()) - assert.Equal(t, "mydata", cluster.deviceSets[0].Name) - assert.True(t, cluster.deviceSets[0].Portable) - _, dataOK := cluster.deviceSets[0].PVCSources["data"] - assert.True(t, dataOK) - assert.Equal(t, "custom-scheduler", cluster.deviceSets[0].SchedulerName) - - // Verify that the PVC has the expected generated name with the default of "data" in the name - pvcs, err := clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 1, len(pvcs.Items)) - expectedName := claim.Name - if !setTemplateName { - expectedName = "data" - } - assert.Equal(t, fmt.Sprintf("mydata-%s-0", expectedName), pvcs.Items[0].GenerateName) - assert.Equal(t, cluster.clusterInfo.Namespace, pvcs.Items[0].Namespace) -} - -func TestPrepareDeviceSetWithHolesInPVCs(t *testing.T) { - ctx := context.TODO() - clientset := testexec.New(t, 1) - context := &clusterd.Context{ - Clientset: clientset, - } - - deviceSet := cephv1.StorageClassDeviceSet{ - Name: "mydata", - Count: 1, - Portable: true, - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{testVolumeClaim("data"), testVolumeClaim("metadata"), testVolumeClaim("wal")}, - SchedulerName: "custom-scheduler", - } - spec := cephv1.ClusterSpec{ - Storage: cephv1.StorageScopeSpec{StorageClassDeviceSets: []cephv1.StorageClassDeviceSet{deviceSet}}, - } - ns := "testns" - cluster := &Cluster{ - context: context, - clusterInfo: client.AdminClusterInfo(ns), - spec: spec, - } - - pvcSuffix := 0 - var pvcReactor k8stesting.ReactionFunc = func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { - // PVCs are created with generateName used, and we need to capture the create calls and - // generate a name for them in order for PVCs to all have unique names. - createAction, ok := action.(k8stesting.CreateAction) - if !ok { - t.Fatal("err! action is not a create action") - return false, nil, nil - } - obj := createAction.GetObject() - pvc, ok := obj.(*corev1.PersistentVolumeClaim) - if !ok { - t.Fatal("err! action not a PVC") - return false, nil, nil - } - if pvc.Name == "" { - pvc.Name = fmt.Sprintf("%s-%d", pvc.GenerateName, pvcSuffix) - logger.Info("generated name for PVC:", pvc.Name) - pvcSuffix++ - } else { - logger.Info("PVC already has a name:", pvc.Name) - } - // setting pvc.Name above modifies the action in-place before future reactors occur - // we want the default reactor to create the resource, so return false as if we did nothing - return false, nil, nil - } - clientset.PrependReactor("create", "persistentvolumeclaims", pvcReactor) - - // Create 3 PVCs for two OSDs in the device set - config := newProvisionErrors() - cluster.prepareStorageClassDeviceSets(config) - assert.Equal(t, 1, len(cluster.deviceSets)) - assert.Equal(t, 0, config.len()) - assert.Equal(t, "mydata", cluster.deviceSets[0].Name) - assert.True(t, cluster.deviceSets[0].Portable) - _, dataOK := cluster.deviceSets[0].PVCSources["data"] - assert.True(t, dataOK) - - // Verify the PVCs all exist - pvcs, err := clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 3, len(pvcs.Items)) - assertPVCExists(t, clientset, ns, "mydata-data-0-0") - assertPVCExists(t, clientset, ns, "mydata-metadata-0-1") - assertPVCExists(t, clientset, ns, "mydata-wal-0-2") - - // Create 3 more PVCs (6 total) for two OSDs in the device set - cluster.spec.Storage.StorageClassDeviceSets[0].Count = 2 - cluster.prepareStorageClassDeviceSets(config) - assert.Equal(t, 2, len(cluster.deviceSets)) - assert.Equal(t, 0, config.len()) - - // Verify the PVCs all exist - pvcs, err = clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 6, len(pvcs.Items)) - assertPVCExists(t, clientset, ns, "mydata-data-0-0") - assertPVCExists(t, clientset, ns, "mydata-metadata-0-1") - assertPVCExists(t, clientset, ns, "mydata-wal-0-2") - assertPVCExists(t, clientset, ns, "mydata-data-1-3") - assertPVCExists(t, clientset, ns, "mydata-metadata-1-4") - assertPVCExists(t, clientset, ns, "mydata-wal-1-5") - - // Verify the same number of PVCs exist after calling the reconcile again on the PVCs - cluster.prepareStorageClassDeviceSets(config) - assert.Equal(t, 2, len(cluster.deviceSets)) - pvcs, err = clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 6, len(pvcs.Items)) - - // Delete a single PVC and verify it will be re-created - err = clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).Delete(ctx, "mydata-wal-0-2", metav1.DeleteOptions{}) - assert.NoError(t, err) - cluster.prepareStorageClassDeviceSets(config) - assert.Equal(t, 2, len(cluster.deviceSets)) - pvcs, err = clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 6, len(pvcs.Items)) - - // Delete the PVCs for an OSD and verify it will not be re-created if the count is reduced - cluster.spec.Storage.StorageClassDeviceSets[0].Count = 1 - err = clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).Delete(ctx, "mydata-data-0-0", metav1.DeleteOptions{}) - assert.NoError(t, err) - err = clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).Delete(ctx, "mydata-metadata-0-1", metav1.DeleteOptions{}) - assert.NoError(t, err) - err = clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).Delete(ctx, "mydata-wal-0-6", metav1.DeleteOptions{}) - assert.NoError(t, err) - cluster.prepareStorageClassDeviceSets(config) - assert.Equal(t, 1, len(cluster.deviceSets)) - pvcs, err = clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 3, len(pvcs.Items)) - - // Scale back up to a count of two and confirm that a new index is used for the PVCs - cluster.spec.Storage.StorageClassDeviceSets[0].Count = 2 - cluster.prepareStorageClassDeviceSets(config) - assert.Equal(t, 2, len(cluster.deviceSets)) - pvcs, err = clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 6, len(pvcs.Items)) - assertPVCExists(t, clientset, ns, "mydata-data-1-3") - assertPVCExists(t, clientset, ns, "mydata-metadata-1-4") - assertPVCExists(t, clientset, ns, "mydata-wal-1-5") - assertPVCExists(t, clientset, ns, "mydata-data-2-7") - assertPVCExists(t, clientset, ns, "mydata-metadata-2-8") - assertPVCExists(t, clientset, ns, "mydata-wal-2-9") -} - -func assertPVCExists(t *testing.T, clientset kubernetes.Interface, namespace, name string) { - pvc, err := clientset.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - assert.NoError(t, err) - assert.NotNil(t, pvc) -} - -func testVolumeClaim(name string) corev1.PersistentVolumeClaim { - storageClass := "mysource" - claim := corev1.PersistentVolumeClaim{Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: &storageClass, - }} - claim.Name = name - return claim -} - -func TestPrepareDeviceSetsWithCrushParams(t *testing.T) { - ctx := context.TODO() - clientset := testexec.New(t, 1) - context := &clusterd.Context{ - Clientset: clientset, - } - deviceSet := cephv1.StorageClassDeviceSet{ - Name: "datawithcrushparams1", - Count: 1, - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{testVolumeClaim("testwithcrushparams1")}, - SchedulerName: "custom-scheduler", - } - deviceSet.VolumeClaimTemplates[0].Annotations = map[string]string{ - "crushDeviceClass": "ssd", - "crushInitialWeight": "0.75", - "crushPrimaryAffinity": "0.666", - } - - spec := cephv1.ClusterSpec{ - Storage: cephv1.StorageScopeSpec{StorageClassDeviceSets: []cephv1.StorageClassDeviceSet{deviceSet}}, - } - cluster := &Cluster{ - context: context, - clusterInfo: client.AdminClusterInfo("testns"), - spec: spec, - } - - config := newProvisionErrors() - cluster.prepareStorageClassDeviceSets(config) - assert.Equal(t, 1, len(cluster.deviceSets)) - assert.Equal(t, cluster.deviceSets[0].CrushDeviceClass, "ssd") - assert.Equal(t, cluster.deviceSets[0].CrushInitialWeight, "0.75") - assert.Equal(t, cluster.deviceSets[0].CrushPrimaryAffinity, "0.666") - - pvcs, err := clientset.CoreV1().PersistentVolumeClaims(cluster.clusterInfo.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Equal(t, 1, len(pvcs.Items)) -} diff --git a/pkg/operator/ceph/cluster/osd/envs.go b/pkg/operator/ceph/cluster/osd/envs.go deleted file mode 100644 index 11b714d88..000000000 --- a/pkg/operator/ceph/cluster/osd/envs.go +++ /dev/null @@ -1,235 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "strconv" - - "github.com/rook/rook/pkg/daemon/ceph/client" - kms "github.com/rook/rook/pkg/daemon/ceph/osd/kms" - opmon "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/k8sutil" - "gopkg.in/ini.v1" - v1 "k8s.io/api/core/v1" -) - -const ( - osdDatabaseSizeEnvVarName = "ROOK_OSD_DATABASE_SIZE" - osdWalSizeEnvVarName = "ROOK_OSD_WAL_SIZE" - osdsPerDeviceEnvVarName = "ROOK_OSDS_PER_DEVICE" - osdDeviceClassEnvVarName = "ROOK_OSD_DEVICE_CLASS" - // EncryptedDeviceEnvVarName is used in the pod spec to indicate whether the OSD is encrypted or not - EncryptedDeviceEnvVarName = "ROOK_ENCRYPTED_DEVICE" - PVCNameEnvVarName = "ROOK_PVC_NAME" - // CephVolumeEncryptedKeyEnvVarName is the env variable used by ceph-volume to encrypt the OSD (raw mode) - // Hardcoded in ceph-volume do NOT touch - CephVolumeEncryptedKeyEnvVarName = "CEPH_VOLUME_DMCRYPT_SECRET" - osdMetadataDeviceEnvVarName = "ROOK_METADATA_DEVICE" - osdWalDeviceEnvVarName = "ROOK_WAL_DEVICE" - // PVCBackedOSDVarName indicates whether the OSD is on PVC ("true") or not ("false") - PVCBackedOSDVarName = "ROOK_PVC_BACKED_OSD" - blockPathVarName = "ROOK_BLOCK_PATH" - cvModeVarName = "ROOK_CV_MODE" - lvBackedPVVarName = "ROOK_LV_BACKED_PV" - CrushDeviceClassVarName = "ROOK_OSD_CRUSH_DEVICE_CLASS" - CrushInitialWeightVarName = "ROOK_OSD_CRUSH_INITIAL_WEIGHT" - CrushRootVarName = "ROOK_CRUSHMAP_ROOT" - tcmallocMaxTotalThreadCacheBytesEnv = "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES" -) - -var ( - cephEnvConfigFile = "/etc/sysconfig/ceph" -) - -func (c *Cluster) getConfigEnvVars(osdProps osdProperties, dataDir string) []v1.EnvVar { - envVars := []v1.EnvVar{ - nodeNameEnvVar(osdProps.crushHostname), - {Name: "ROOK_CLUSTER_ID", Value: string(c.clusterInfo.OwnerInfo.GetUID())}, - {Name: "ROOK_CLUSTER_NAME", Value: string(c.clusterInfo.NamespacedName().Name)}, - k8sutil.PodIPEnvVar(k8sutil.PrivateIPEnvVar), - k8sutil.PodIPEnvVar(k8sutil.PublicIPEnvVar), - opmon.PodNamespaceEnvVar(c.clusterInfo.Namespace), - opmon.EndpointEnvVar(), - opmon.SecretEnvVar(), - opmon.CephUsernameEnvVar(), - opmon.CephSecretEnvVar(), - k8sutil.ConfigDirEnvVar(dataDir), - k8sutil.ConfigOverrideEnvVar(), - {Name: "ROOK_FSID", ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "rook-ceph-mon"}, - Key: "fsid", - }, - }}, - k8sutil.NodeEnvVar(), - {Name: CrushRootVarName, Value: client.GetCrushRootFromSpec(&c.spec)}, - } - - // Give a hint to the prepare pod for what the host in the CRUSH map should be - crushmapHostname := osdProps.crushHostname - if !osdProps.portable && osdProps.onPVC() { - // If it's a pvc that's not portable we only know what the host name should be when inside the osd prepare pod - crushmapHostname = "" - } - envVars = append(envVars, v1.EnvVar{Name: "ROOK_CRUSHMAP_HOSTNAME", Value: crushmapHostname}) - - // Append ceph-volume environment variables - envVars = append(envVars, cephVolumeEnvVar()...) - - if osdProps.storeConfig.DatabaseSizeMB != 0 { - envVars = append(envVars, v1.EnvVar{Name: osdDatabaseSizeEnvVarName, Value: strconv.Itoa(osdProps.storeConfig.DatabaseSizeMB)}) - } - - if osdProps.storeConfig.WalSizeMB != 0 { - envVars = append(envVars, v1.EnvVar{Name: osdWalSizeEnvVarName, Value: strconv.Itoa(osdProps.storeConfig.WalSizeMB)}) - } - - if osdProps.storeConfig.OSDsPerDevice != 0 { - envVars = append(envVars, v1.EnvVar{Name: osdsPerDeviceEnvVarName, Value: strconv.Itoa(osdProps.storeConfig.OSDsPerDevice)}) - } - - if osdProps.storeConfig.EncryptedDevice { - envVars = append(envVars, v1.EnvVar{Name: EncryptedDeviceEnvVarName, Value: "true"}) - } - - return envVars -} - -func nodeNameEnvVar(name string) v1.EnvVar { - return v1.EnvVar{Name: "ROOK_NODE_NAME", Value: name} -} - -func dataDevicesEnvVar(dataDevices string) v1.EnvVar { - return v1.EnvVar{Name: "ROOK_DATA_DEVICES", Value: dataDevices} -} - -func deviceFilterEnvVar(filter string) v1.EnvVar { - return v1.EnvVar{Name: "ROOK_DATA_DEVICE_FILTER", Value: filter} -} - -func devicePathFilterEnvVar(filter string) v1.EnvVar { - return v1.EnvVar{Name: "ROOK_DATA_DEVICE_PATH_FILTER", Value: filter} -} - -func dataDeviceClassEnvVar(deviceClass string) v1.EnvVar { - return v1.EnvVar{Name: osdDeviceClassEnvVarName, Value: deviceClass} -} - -func metadataDeviceEnvVar(metadataDevice string) v1.EnvVar { - return v1.EnvVar{Name: osdMetadataDeviceEnvVarName, Value: metadataDevice} -} - -func walDeviceEnvVar(walDevice string) v1.EnvVar { - return v1.EnvVar{Name: osdWalDeviceEnvVarName, Value: walDevice} -} - -func pvcBackedOSDEnvVar(pvcBacked string) v1.EnvVar { - return v1.EnvVar{Name: PVCBackedOSDVarName, Value: pvcBacked} -} - -func setDebugLogLevelEnvVar(debug bool) v1.EnvVar { - level := "INFO" - if debug { - level = "DEBUG" - } - return v1.EnvVar{Name: "ROOK_LOG_LEVEL", Value: level} -} - -func blockPathEnvVariable(lvPath string) v1.EnvVar { - return v1.EnvVar{Name: blockPathVarName, Value: lvPath} -} - -func cvModeEnvVariable(cvMode string) v1.EnvVar { - return v1.EnvVar{Name: cvModeVarName, Value: cvMode} -} - -func lvBackedPVEnvVar(lvBackedPV string) v1.EnvVar { - return v1.EnvVar{Name: lvBackedPVVarName, Value: lvBackedPV} -} - -func crushDeviceClassEnvVar(crushDeviceClass string) v1.EnvVar { - return v1.EnvVar{Name: CrushDeviceClassVarName, Value: crushDeviceClass} -} - -func crushInitialWeightEnvVar(crushInitialWeight string) v1.EnvVar { - return v1.EnvVar{Name: CrushInitialWeightVarName, Value: crushInitialWeight} -} - -func encryptedDeviceEnvVar(encryptedDevice bool) v1.EnvVar { - return v1.EnvVar{Name: EncryptedDeviceEnvVarName, Value: strconv.FormatBool(encryptedDevice)} -} -func pvcNameEnvVar(pvcName string) v1.EnvVar { - return v1.EnvVar{Name: PVCNameEnvVarName, Value: pvcName} -} - -func cephVolumeRawEncryptedEnvVarFromSecret(osdProps osdProperties) v1.EnvVar { - return v1.EnvVar{ - Name: CephVolumeEncryptedKeyEnvVarName, - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: kms.GenerateOSDEncryptionSecretName(osdProps.pvc.ClaimName), - }, - Key: kms.OsdEncryptionSecretNameKeyName, - }, - }, - } -} - -func cephVolumeEnvVar() []v1.EnvVar { - return []v1.EnvVar{ - {Name: "CEPH_VOLUME_DEBUG", Value: "1"}, - {Name: "CEPH_VOLUME_SKIP_RESTORECON", Value: "1"}, - // LVM will avoid interaction with udev. - // LVM will manage the relevant nodes in /dev directly. - {Name: "DM_DISABLE_UDEV", Value: "1"}, - } -} - -func osdActivateEnvVar() []v1.EnvVar { - monEnvVars := []v1.EnvVar{ - {Name: "ROOK_CEPH_MON_HOST", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{LocalObjectReference: v1.LocalObjectReference{ - Name: "rook-ceph-config"}, - Key: "mon_host"}}}, - {Name: "CEPH_ARGS", Value: "-m $(ROOK_CEPH_MON_HOST)"}, - } - - return append(cephVolumeEnvVar(), monEnvVars...) -} - -func getTcmallocMaxTotalThreadCacheBytes(tcmallocMaxTotalThreadCacheBytes string) v1.EnvVar { - var value string - // If empty we read the default value from the file coming with the package - if tcmallocMaxTotalThreadCacheBytes == "" { - value = getTcmallocMaxTotalThreadCacheBytesFromFile() - } else { - value = tcmallocMaxTotalThreadCacheBytes - } - - return v1.EnvVar{Name: tcmallocMaxTotalThreadCacheBytesEnv, Value: value} -} - -func getTcmallocMaxTotalThreadCacheBytesFromFile() string { - iniCephEnvConfigFile, err := ini.Load(cephEnvConfigFile) - if err != nil { - return "" - } - - return iniCephEnvConfigFile.Section("").Key(tcmallocMaxTotalThreadCacheBytesEnv).String() -} diff --git a/pkg/operator/ceph/cluster/osd/envs_test.go b/pkg/operator/ceph/cluster/osd/envs_test.go deleted file mode 100644 index 600935c83..000000000 --- a/pkg/operator/ceph/cluster/osd/envs_test.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -var ( - sysconfig = []byte(`# /etc/sysconfig/ceph -# -# Environment file for ceph daemon systemd unit files. -# - -# Increase tcmalloc cache size -TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 - -## automatically restart systemd units on upgrade -# -# By default, it is left to the administrator to restart -# ceph daemons (or their related systemd units) manually -# when the 'ceph' package is upgraded. By setting this -# parameter to "yes", package upgrade will trigger a -# "systemctl try-restart" on all the ceph systemd units -# currently active on the node. -# -CEPH_AUTO_RESTART_ON_UPGRADE=no`) -) - -func TestCephVolumeEnvVar(t *testing.T) { - cvEnv := cephVolumeEnvVar() - assert.Equal(t, "CEPH_VOLUME_DEBUG", cvEnv[0].Name) - assert.Equal(t, "1", cvEnv[0].Value) - assert.Equal(t, "CEPH_VOLUME_SKIP_RESTORECON", cvEnv[1].Name) - assert.Equal(t, "1", cvEnv[1].Value) - assert.Equal(t, "DM_DISABLE_UDEV", cvEnv[2].Name) - assert.Equal(t, "1", cvEnv[1].Value) -} - -func TestOsdActivateEnvVar(t *testing.T) { - osdActivateEnv := osdActivateEnvVar() - assert.Equal(t, 5, len(osdActivateEnv)) - assert.Equal(t, "CEPH_VOLUME_DEBUG", osdActivateEnv[0].Name) - assert.Equal(t, "1", osdActivateEnv[0].Value) - assert.Equal(t, "CEPH_VOLUME_SKIP_RESTORECON", osdActivateEnv[1].Name) - assert.Equal(t, "1", osdActivateEnv[1].Value) - assert.Equal(t, "DM_DISABLE_UDEV", osdActivateEnv[2].Name) - assert.Equal(t, "1", osdActivateEnv[1].Value) - assert.Equal(t, "ROOK_CEPH_MON_HOST", osdActivateEnv[3].Name) - assert.Equal(t, "CEPH_ARGS", osdActivateEnv[4].Name) - assert.Equal(t, "-m $(ROOK_CEPH_MON_HOST)", osdActivateEnv[4].Value) -} - -func TestGetTcmallocMaxTotalThreadCacheBytes(t *testing.T) { - // No file, nothing - v := getTcmallocMaxTotalThreadCacheBytes("") - assert.Equal(t, "", v.Value) - - // File and arg are empty so we can an empty value - file, err := ioutil.TempFile("", "") - assert.NoError(t, err) - defer os.Remove(file.Name()) - cephEnvConfigFile = file.Name() - v = getTcmallocMaxTotalThreadCacheBytes("") - assert.Equal(t, "", v.Value) - - // Arg is not empty - v = getTcmallocMaxTotalThreadCacheBytes("67108864") - assert.Equal(t, "67108864", v.Value) - - // Read the file now - err = ioutil.WriteFile(file.Name(), sysconfig, 0444) - assert.NoError(t, err) - v = getTcmallocMaxTotalThreadCacheBytes("") - assert.Equal(t, "134217728", v.Value) -} diff --git a/pkg/operator/ceph/cluster/osd/health.go b/pkg/operator/ceph/cluster/osd/health.go deleted file mode 100644 index f1cf93df0..000000000 --- a/pkg/operator/ceph/cluster/osd/health.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "context" - "fmt" - "reflect" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "github.com/rook/rook/pkg/operator/k8sutil" - kerrors "k8s.io/apimachinery/pkg/api/errors" -) - -const ( - upStatus = 1 - inStatus = 1 - graceTime = 60 * time.Minute -) - -var ( - defaultHealthCheckInterval = 60 * time.Second -) - -// OSDHealthMonitor defines OSD process monitoring -type OSDHealthMonitor struct { - context *clusterd.Context - clusterInfo *client.ClusterInfo - removeOSDsIfOUTAndSafeToRemove bool - interval *time.Duration -} - -// NewOSDHealthMonitor instantiates OSD monitoring -func NewOSDHealthMonitor(context *clusterd.Context, clusterInfo *client.ClusterInfo, removeOSDsIfOUTAndSafeToRemove bool, healthCheck cephv1.CephClusterHealthCheckSpec) *OSDHealthMonitor { - h := &OSDHealthMonitor{ - context: context, - clusterInfo: clusterInfo, - removeOSDsIfOUTAndSafeToRemove: removeOSDsIfOUTAndSafeToRemove, - interval: &defaultHealthCheckInterval, - } - - // allow overriding the check interval - checkInterval := healthCheck.DaemonHealth.ObjectStorageDaemon.Interval - if checkInterval != nil { - logger.Infof("ceph osd status in namespace %q check interval %q", h.clusterInfo.Namespace, checkInterval.Duration.String()) - h.interval = &checkInterval.Duration - } - - return h -} - -// Start runs monitoring logic for osds status at set intervals -func (m *OSDHealthMonitor) Start(stopCh chan struct{}) { - - for { - select { - case <-time.After(*m.interval): - logger.Debug("checking osd processes status.") - m.checkOSDHealth() - - case <-stopCh: - logger.Infof("Stopping monitoring of OSDs in namespace %q", m.clusterInfo.Namespace) - return - } - } -} - -// Update updates the removeOSDsIfOUTAndSafeToRemove -func (m *OSDHealthMonitor) Update(removeOSDsIfOUTAndSafeToRemove bool) { - m.removeOSDsIfOUTAndSafeToRemove = removeOSDsIfOUTAndSafeToRemove -} - -// checkOSDHealth takes action when needed if the OSDs are not healthy -func (m *OSDHealthMonitor) checkOSDHealth() { - err := m.checkOSDDump() - if err != nil { - logger.Debugf("failed to check OSD Dump. %v", err) - } - err = m.checkDeviceClasses() - if err != nil { - logger.Debugf("failed to check device classes. %v", err) - } -} - -func (m *OSDHealthMonitor) checkDeviceClasses() error { - devices, err := client.GetDeviceClasses(m.context, m.clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get osd device classes") - } - - if len(devices) > 0 { - m.updateCephStatus(devices) - } - - return nil -} - -func (m *OSDHealthMonitor) checkOSDDump() error { - osdDump, err := client.GetOSDDump(m.context, m.clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get osd dump") - } - - for _, osdStatus := range osdDump.OSDs { - id64, err := osdStatus.OSD.Int64() - if err != nil { - continue - } - id := int(id64) - - logger.Debugf("validating status of osd.%d", id) - - status, in, err := osdDump.StatusByID(int64(id)) - if err != nil { - return err - } - - if status == upStatus { - logger.Debugf("osd.%d is healthy.", id) - continue - } - - logger.Debugf("osd.%d is marked 'DOWN'", id) - - if in != inStatus { - logger.Debugf("osd.%d is marked 'OUT'", id) - if m.removeOSDsIfOUTAndSafeToRemove { - if err := m.removeOSDDeploymentIfSafeToDestroy(id); err != nil { - logger.Errorf("error handling marked out osd osd.%d. %v", id, err) - } - } - } - } - - return nil -} - -func (m *OSDHealthMonitor) removeOSDDeploymentIfSafeToDestroy(outOSDid int) error { - label := fmt.Sprintf("ceph-osd-id=%d", outOSDid) - dp, err := k8sutil.GetDeployments(m.context.Clientset, m.clusterInfo.Namespace, label) - if err != nil { - if kerrors.IsNotFound(err) { - return nil - } - return errors.Wrapf(err, "failed to get osd deployment of osd id %d", outOSDid) - } - if len(dp.Items) != 0 { - safeToDestroyOSD, err := client.OsdSafeToDestroy(m.context, m.clusterInfo, outOSDid) - if err != nil { - return errors.Wrapf(err, "failed to get osd deployment of osd id %d", outOSDid) - } - - if safeToDestroyOSD { - podCreationTimestamp := dp.Items[0].GetCreationTimestamp() - podDeletionTimeStamp := podCreationTimestamp.Add(graceTime) - currentTime := time.Now().UTC() - if podDeletionTimeStamp.Before(currentTime) { - logger.Infof("osd.%d is 'safe-to-destroy'. removing the osd deployment.", outOSDid) - if err := k8sutil.DeleteDeployment(m.context.Clientset, dp.Items[0].Namespace, dp.Items[0].Name); err != nil { - return errors.Wrapf(err, "failed to delete osd deployment %s", dp.Items[0].Name) - } - } - } - } - return nil -} - -// updateCephStorage updates the CR with deviceclass details -func (m *OSDHealthMonitor) updateCephStatus(devices []string) { - cephCluster := &cephv1.CephCluster{} - cephClusterStorage := cephv1.CephStorage{} - - for _, device := range devices { - cephClusterStorage.DeviceClasses = append(cephClusterStorage.DeviceClasses, cephv1.DeviceClasses{Name: device}) - } - err := m.context.Client.Get(context.TODO(), m.clusterInfo.NamespacedName(), cephCluster) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephCluster resource not found. Ignoring since object must be deleted.") - return - } - logger.Errorf("failed to retrieve ceph cluster %q to update ceph Storage. %v", m.clusterInfo.NamespacedName().Name, err) - return - } - if !reflect.DeepEqual(cephCluster.Status.CephStorage, &cephClusterStorage) { - cephCluster.Status.CephStorage = &cephClusterStorage - if err := reporting.UpdateStatus(m.context.Client, cephCluster); err != nil { - logger.Errorf("failed to update cluster %q Storage. %v", m.clusterInfo.NamespacedName().Name, err) - return - } - } -} diff --git a/pkg/operator/ceph/cluster/osd/health_test.go b/pkg/operator/ceph/cluster/osd/health_test.go deleted file mode 100644 index cb3f184ec..000000000 --- a/pkg/operator/ceph/cluster/osd/health_test.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package osd - -import ( - "context" - "fmt" - "reflect" - "testing" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - testexec "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - apps "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestOSDHealthCheck(t *testing.T) { - ctx := context.TODO() - clientset := testexec.New(t, 2) - clusterInfo := client.AdminClusterInfo("fake") - - var execCount = 0 - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("ExecuteCommandWithOutputFile: %s %v", command, args) - execCount++ - if args[1] == "dump" { - // Mock executor for OSD Dump command, returning an osd in Down state - return `{"OSDs": [{"OSD": 0, "Up": 0, "In": 0}]}`, nil - } else if args[1] == "safe-to-destroy" { - // Mock executor for OSD Dump command, returning an osd in Down state - return `{"safe_to_destroy":[0],"active":[],"missing_stats":[],"stored_pgs":[]}`, nil - } else if args[0] == "auth" && args[1] == "get-or-create-key" { - return "{\"key\":\"mysecurekey\", \"osdid\":3.0}", nil - } - return "", nil - }, - } - - // Setting up objects needed to create OSD - context := &clusterd.Context{ - Executor: executor, - Clientset: clientset, - } - - labels := map[string]string{ - k8sutil.AppAttr: AppName, - k8sutil.ClusterAttr: clusterInfo.Namespace, - OsdIdLabelKey: "0", - } - - deployment := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "osd0", - Namespace: clusterInfo.Namespace, - Labels: labels, - }, - } - if _, err := context.Clientset.AppsV1().Deployments(clusterInfo.Namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil { - logger.Errorf("Error creating fake deployment: %v", err) - } - - // Check if the osd deployment is created - dp, _ := context.Clientset.AppsV1().Deployments(clusterInfo.Namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%v=%d", OsdIdLabelKey, 0)}) - assert.Equal(t, 1, len(dp.Items)) - - // Initializing an OSD monitoring - osdMon := NewOSDHealthMonitor(context, clusterInfo, true, cephv1.CephClusterHealthCheckSpec{}) - - // Run OSD monitoring routine - err := osdMon.checkOSDDump() - assert.Nil(t, err) - // After creating an OSD, the dump has 1 mocked cmd and safe to destroy has 1 mocked cmd - assert.Equal(t, 2, execCount) - - // Check if the osd deployment was deleted - dp, _ = context.Clientset.AppsV1().Deployments(clusterInfo.Namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%v=%d", OsdIdLabelKey, 0)}) - assert.Equal(t, 0, len(dp.Items)) -} - -func TestMonitorStart(t *testing.T) { - stopCh := make(chan struct{}) - osdMon := NewOSDHealthMonitor(&clusterd.Context{}, client.AdminClusterInfo("ns"), true, cephv1.CephClusterHealthCheckSpec{}) - logger.Infof("starting osd monitor") - go osdMon.Start(stopCh) - close(stopCh) -} - -func TestNewOSDHealthMonitor(t *testing.T) { - clusterInfo := client.AdminClusterInfo("test") - c := &clusterd.Context{} - time10s, _ := time.ParseDuration("10s") - type args struct { - context *clusterd.Context - removeOSDsIfOUTAndSafeToRemove bool - healthCheck cephv1.CephClusterHealthCheckSpec - } - tests := []struct { - name string - args args - want *OSDHealthMonitor - }{ - {"default-interval", args{c, false, cephv1.CephClusterHealthCheckSpec{}}, &OSDHealthMonitor{c, clusterInfo, false, &defaultHealthCheckInterval}}, - {"10s-interval", args{c, false, cephv1.CephClusterHealthCheckSpec{DaemonHealth: cephv1.DaemonHealthSpec{ObjectStorageDaemon: cephv1.HealthCheckSpec{Interval: &metav1.Duration{Duration: time10s}}}}}, &OSDHealthMonitor{c, clusterInfo, false, &time10s}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := NewOSDHealthMonitor(tt.args.context, clusterInfo, tt.args.removeOSDsIfOUTAndSafeToRemove, tt.args.healthCheck); !reflect.DeepEqual(got, tt.want) { - t.Errorf("NewOSDHealthMonitor() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestDeviceClasses(t *testing.T) { - clusterInfo := client.AdminClusterInfo("fake") - clusterInfo.SetName("rook-ceph") - - var execCount = 0 - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - return "{\"key\":\"mysecurekey\", \"osdid\":3.0}", nil - }, - } - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("ExecuteCommandWithOutputFile: %s %v", command, args) - execCount++ - if args[1] == "crush" && args[2] == "class" && args[3] == "ls" { - // Mock executor for OSD crush class list command, returning ssd as available device class - return `["ssd"]`, nil - } - return "", nil - } - - cephCluster := &cephv1.CephCluster{} - // Objects to track in the fake client. - object := []runtime.Object{ - cephCluster, - } - s := scheme.Scheme - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - context := &clusterd.Context{ - Executor: executor, - Client: cl, - } - - // Initializing an OSD monitoring - osdMon := NewOSDHealthMonitor(context, clusterInfo, true, cephv1.CephClusterHealthCheckSpec{}) - - // Run OSD monitoring routine - err := osdMon.checkDeviceClasses() - assert.Nil(t, err) - // checkDeviceClasses has 1 mocked cmd for fetching the device classes - assert.Equal(t, 1, execCount) -} diff --git a/pkg/operator/ceph/cluster/osd/integration_test.go b/pkg/operator/ceph/cluster/osd/integration_test.go deleted file mode 100644 index bf0fd34ea..000000000 --- a/pkg/operator/ceph/cluster/osd/integration_test.go +++ /dev/null @@ -1,683 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "context" - "encoding/json" - "fmt" - "os" - "strconv" - "strings" - "testing" - "time" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephclientfake "github.com/rook/rook/pkg/daemon/ceph/client/fake" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - "github.com/tevino/abool" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - k8stesting "k8s.io/client-go/testing" -) - -var ( - // global to allow creating helper functions that are not inline with test functions - testIDGenerator osdIDGenerator -) - -// TODO: look into: failed to calculate diff between current deployment and newly generated one. -// Failed to generate strategic merge patch: map: map[] does not contain declared merge key: uid -// appears in unit tests, but does it appear in real-world? - -// Test integration between OSD creates and updates including on both nodes and PVCs. -// The definition for this test is a wrapper for the test function that adds a timeout. -func TestOSDIntegration(t *testing.T) { - oldLogger := *logger - defer func() { logger = &oldLogger }() // reset logger to default after this test - logger.SetLevel(capnslog.TRACE) // want more log info for this test if it fails - - oldOpportunisticDuration := osdOpportunisticUpdateDuration - oldMinuteDuration := minuteTickerDuration - defer func() { - osdOpportunisticUpdateDuration = oldOpportunisticDuration - minuteTickerDuration = oldMinuteDuration - }() - // lower the check durations for unit tests to speed them up - osdOpportunisticUpdateDuration = 1 * time.Millisecond - minuteTickerDuration = 3 * time.Millisecond - - done := make(chan bool) - // runs in less than 650ms on 6-core, 16MB RAM system, but github CI can be much slower - timeout := time.After(20 * 750 * time.Millisecond) - - go func() { - // use defer because t.Fatal will kill this goroutine, and we always want done set if the - // test func stops running - defer func() { done <- true }() - // run the actual test - testOSDIntegration(t) - }() - - select { - case <-timeout: - t.Fatal("Test timed out. This is a test failure.") - case <-done: - } -} - -// This is the actual test. If it hangs, we should consider that an error. -func testOSDIntegration(t *testing.T) { - ctx := context.TODO() - namespace := "osd-integration" - clusterName := "my-cluster" - - testIDGenerator = newOSDIDGenerator() - - // mock/stub functions as needed - oldConditionExportFunc := updateConditionFunc - defer func() { - updateConditionFunc = oldConditionExportFunc - }() - // stub out the conditionExportFunc to do nothing. we do not have a fake Rook interface that - // allows us to interact with a CephCluster resource like the fake K8s clientset. - updateConditionFunc = func(c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status corev1.ConditionStatus, reason cephv1.ConditionReason, message string) { - // do nothing - } - - // set up a fake k8s client set and watcher to generate events that the operator will listen to - clientset := test.NewComplexClientset(t) - test.AddSomeReadyNodes(t, clientset, 3) - assignPodToNode := true - test.PrependComplexJobReactor(t, clientset, assignPodToNode) - test.SetFakeKubernetesVersion(clientset, "v1.13.2") // v1.13 or higher is required for OSDs on PVC - - os.Setenv(k8sutil.PodNamespaceEnvVar, namespace) - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - statusMapWatcher := watch.NewRaceFreeFake() - clientset.PrependWatchReactor("configmaps", k8stesting.DefaultWatchReactor(statusMapWatcher, nil)) - - failCreatingDeployments := []string{} - failUpdatingDeployments := []string{} - deploymentGeneration := int64(1) // mock deployment generation constantly increasing - deploymentsCreated := []string{} - deploymentsUpdated := []string{} - var deploymentReactor k8stesting.ReactionFunc = func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { - switch action := action.(type) { - case k8stesting.CreateActionImpl: - obj := action.GetObject() - d, ok := obj.(*appsv1.Deployment) - if !ok { - panic(fmt.Sprintf("object not a deployment: %+v", obj)) - } - t.Logf("deployment reactor: create event for deployment %q", d.Name) - // (1) keep track of deployments which have been created - deploymentsCreated = append(deploymentsCreated, d.Name) - // (2) set deployments ready immediately. Don't have to test waiting for deployments to - // be ready b/c that is tested thoroughly in update_test.go - d.Status.ObservedGeneration = deploymentGeneration - d.Status.UpdatedReplicas = 1 - d.Status.ReadyReplicas = 1 - deploymentGeneration++ - // (3) return a failure if asked - for _, match := range failCreatingDeployments { - if strings.Contains(d.Name, match) { - return true, nil, errors.Errorf("induced error creating deployment %q", d.Name) - } - } - - case k8stesting.UpdateActionImpl: - obj := action.GetObject() - d, ok := obj.(*appsv1.Deployment) - if !ok { - panic(fmt.Sprintf("object not a deployment: %+v", obj)) - } - t.Logf("deployment reactor: update event for deployment %q", d.Name) - // (1) keep track of deployments which have been created - deploymentsUpdated = append(deploymentsUpdated, d.Name) - // (2) set deployments ready immediately. Don't have to test waiting for deployments to - // be ready b/c that is tested thoroughly in update_test.go - d.Status.ObservedGeneration = deploymentGeneration - d.Status.UpdatedReplicas = 1 - d.Status.ReadyReplicas = 1 - deploymentGeneration++ - // (3) return a failure if asked - for _, match := range failUpdatingDeployments { - if strings.Contains(d.Name, match) { - return true, nil, errors.Errorf("induced error creating deployment %q", d.Name) - } - } - - case k8stesting.DeleteActionImpl: - panic(fmt.Sprintf("deployments should not be deleted: %+v", action)) - } - - // modify the object in-place so that the default reactor will create it with our - // modifications, if we have made any - return false, nil, nil - } - clientset.PrependReactor("*", "deployments", deploymentReactor) - - clusterInfo := cephclient.NewClusterInfo(namespace, clusterName) - clusterInfo.CephVersion = cephver.Pacific - clusterInfo.SetName("mycluster") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - executor := osdIntegrationTestExecutor(t, clientset, namespace) - - context := &clusterd.Context{ - Clientset: clientset, - ConfigDir: "/var/lib/rook", - Executor: executor, - RequestCancelOrchestration: abool.New(), - } - spec := cephv1.ClusterSpec{ - CephVersion: cephv1.CephVersionSpec{ - Image: "quay.io/ceph/ceph:v16.2.0", - }, - DataDirHostPath: context.ConfigDir, - // This storage spec should... (see inline) - Storage: cephv1.StorageScopeSpec{ - UseAllNodes: true, - Selection: cephv1.Selection{ - // ... create 2 osd on each of the 3 nodes (6 total) - DeviceFilter: "vd[ab]", - }, - StorageClassDeviceSets: []cephv1.StorageClassDeviceSet{ - // ... create 6 portable osds - newDummyStorageClassDeviceSet("portable-set", namespace, 6, true, "ec2"), - // ... create 3 local, non-portable osds, one on each node - newDummyStorageClassDeviceSet("local-set", namespace, 3, false, "local"), - }, - }, - } - osdsPerNode := 2 // vda and vdb - - c := New(context, clusterInfo, spec, "myversion") - - var startErr error - var done bool - runReconcile := func() { - // reset environment - c = New(context, clusterInfo, spec, "myversion") - statusMapWatcher.Reset() - - // reset counters - deploymentsCreated = []string{} - deploymentsUpdated = []string{} - done = false - - startErr = c.Start() - done = true - } - waitForDone := func() { - for { - if done == true { - return - } - time.Sleep(1 * time.Millisecond) - } - } - - // NOTE: these tests all use the same environment - t.Run("initial creation", func(t *testing.T) { - go runReconcile() - - cms := waitForNumConfigMaps(clientset, namespace, 12) // 3 nodes + 9 new PVCs - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - - waitForDone() - assert.NoError(t, startErr) - assert.Len(t, deploymentsCreated, 15) - assert.Len(t, deploymentsUpdated, 0) - }) - - t.Run("reconcile again with no changes", func(t *testing.T) { - go runReconcile() - - cms := waitForNumConfigMaps(clientset, namespace, 3) // 3 nodes - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - - waitForDone() - assert.NoError(t, startErr) - assert.Len(t, deploymentsCreated, 0) - assert.Len(t, deploymentsUpdated, 15) - }) - - t.Run("increase number of OSDs", func(t *testing.T) { - spec.Storage.Selection.DeviceFilter = "/dev/vd[abc]" // 3 more (1 more per node) - spec.Storage.StorageClassDeviceSets[0].Count = 8 // 2 more portable - spec.Storage.StorageClassDeviceSets[1].Count = 6 // 3 more (1 more per node) - osdsPerNode = 3 // vda, vdb, vdc - - go runReconcile() - - cms := waitForNumConfigMaps(clientset, namespace, 8) // 3 nodes + 5 new PVCs - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - - waitForDone() - assert.NoError(t, startErr) - assert.Len(t, deploymentsCreated, 8) - assert.Len(t, deploymentsUpdated, 15) - }) - - t.Run("mixed create and update, cancel reconcile, and continue reconcile", func(t *testing.T) { - spec.Storage.Selection.DeviceFilter = "/dev/vd[abcd]" // 3 more (1 more per node) - spec.Storage.StorageClassDeviceSets[0].Count = 10 // 2 more portable - osdsPerNode = 4 // vd[a-d] - - go runReconcile() - cms := waitForNumConfigMaps(clientset, namespace, 5) // 3 nodes + 2 new PVCs - i := 1 - for _, cm := range cms { - if !strings.Contains(cm.Name, "node") { - // only do node configmaps right now since those are always created and we want - // a deterministic number of configmaps in the next step - continue - } - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - if i == 2 { - t.Log("canceling orchestration") - time.Sleep(10 * time.Millisecond) - // after the second status map is made ready, cancel the orchestration. wait a short - // while to make sure the watcher picks up the updated change - c.context.RequestCancelOrchestration.Set() - break - } - i++ - } - waitForDone() - assert.Error(t, startErr) - t.Logf("c.Start() error: %+v", startErr) - // should have created 2 more OSDs for the configmaps we updated - assert.Len(t, deploymentsCreated, 2) - // we don't know exactly how many updates might have happened by this point - numUpdates := len(deploymentsUpdated) - t.Logf("deployments updated: %d", numUpdates) - - go runReconcile() - cms = waitForNumConfigMaps(clientset, namespace, 5) // 3 nodes + 2 new PVCs - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - waitForDone() - assert.NoError(t, startErr) - assert.Len(t, deploymentsCreated, 3) // 5 less the 2 created in the previous step - assert.Len(t, deploymentsUpdated, 25) // 23 + 2 created in previous step - }) - - t.Run("failures reported in status configmaps", func(t *testing.T) { - spec.Storage.Selection.DeviceFilter = "/dev/vd[abcde]" // 3 more (1 more per node) - osdsPerNode = 5 // vd[a-e] - - go runReconcile() - cms := waitForNumConfigMaps(clientset, namespace, 3) // 3 nodes - for _, cm := range cms { - cpy := cm.DeepCopy() - if strings.Contains(cm.Name, "node1") { - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - } else { - setStatusConfigMapToFailed(t, cpy) // fail on node0 and node2 - } - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - waitForDone() - assert.Error(t, startErr) - t.Logf("c.Start() error: %+v", startErr) - assert.Len(t, deploymentsCreated, 1) - assert.Len(t, deploymentsUpdated, 28) - - // should get back to healthy after - go runReconcile() - cms = waitForNumConfigMaps(clientset, namespace, 3) // 3 nodes - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - waitForDone() - assert.NoError(t, startErr) - assert.Len(t, deploymentsCreated, 2) - assert.Len(t, deploymentsUpdated, 29) // 28 + 1 created in previous step - }) - - t.Run("failures during deployment updates", func(t *testing.T) { - failUpdatingDeployments = []string{"osd-15", "osd-22"} - go runReconcile() - cms := waitForNumConfigMaps(clientset, namespace, 3) // 3 nodes - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - waitForDone() - assert.Error(t, startErr) - t.Logf("c.Start() error: %+v", startErr) - assert.Len(t, deploymentsCreated, 0) - assert.Len(t, deploymentsUpdated, 31) // should attempt to update all deployments - - failUpdatingDeployments = []string{} - go runReconcile() - cms = waitForNumConfigMaps(clientset, namespace, 3) // 3 nodes - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - waitForDone() - assert.NoError(t, startErr) - assert.Len(t, deploymentsCreated, 0) - assert.Len(t, deploymentsUpdated, 31) // all deployments should be updated again - }) - - t.Run("failures during deployment creation", func(t *testing.T) { - spec.Storage.Selection.DeviceFilter = "/dev/vd[abcdef]" // 3 more (1 more per node) - osdsPerNode = 6 // vd[a-f] - - failCreatingDeployments = []string{"osd-31", "osd-33"} - go runReconcile() - cms := waitForNumConfigMaps(clientset, namespace, 3) // 3 nodes - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - waitForDone() - assert.Error(t, startErr) - t.Logf("c.Start() error: %+v", startErr) - assert.Len(t, deploymentsCreated, 3) - assert.Len(t, deploymentsUpdated, 31) - - failCreatingDeployments = []string{} - go runReconcile() - cms = waitForNumConfigMaps(clientset, namespace, 3) // 3 nodes - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - waitForDone() - assert.NoError(t, startErr) - assert.Len(t, deploymentsCreated, 2) // the 2 deployments NOT created before should be created now - assert.Len(t, deploymentsUpdated, 32) // 31 + 1 from previous step - }) - - t.Run("failures from improperly formatted StorageClassDeviceSet", func(t *testing.T) { - newSCDS := cephv1.StorageClassDeviceSet{ - Name: "new", - Count: 3, - Portable: true, - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{}, - } - spec.Storage.StorageClassDeviceSets = append(spec.Storage.StorageClassDeviceSets, newSCDS) - - go runReconcile() - cms := waitForNumConfigMaps(clientset, namespace, 3) // 3 nodes - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - waitForDone() - assert.Error(t, startErr) - t.Logf("c.Start() error: %+v", startErr) - assert.Len(t, deploymentsCreated, 0) - assert.Len(t, deploymentsUpdated, 34) - - spec.Storage.StorageClassDeviceSets[2].VolumeClaimTemplates = []corev1.PersistentVolumeClaim{ - newDummyPVC("data", namespace, "100Gi", "ec2"), - newDummyPVC("metadata", namespace, "10Gi", "uncle-rogers-secret-stuff"), - } - - go runReconcile() - cms = waitForNumConfigMaps(clientset, namespace, 6) // 3 nodes + 3 new PVCs - for _, cm := range cms { - cpy := cm.DeepCopy() - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - waitForDone() - assert.NoError(t, startErr) - assert.Len(t, deploymentsCreated, 3) - assert.Len(t, deploymentsUpdated, 34) - }) - - t.Run("clean up dangling configmaps", func(t *testing.T) { - danglingCM := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dangling-status-configmap", - Namespace: namespace, - Labels: statusConfigMapLabels("node0"), - }, - } - _, err := clientset.CoreV1().ConfigMaps(namespace).Create(ctx, danglingCM, metav1.CreateOptions{}) - assert.NoError(t, err) - - go runReconcile() - cms := waitForNumConfigMaps(clientset, namespace, 4) // 3 nodes + dangling - for _, cm := range cms { - cpy := cm.DeepCopy() - if cpy.Name == "dangling-status-configmap" { - continue - } - setStatusConfigMapToCompleted(t, cpy, osdsPerNode) - updateStatusConfigmap(clientset, statusMapWatcher, cpy) - } - waitForDone() - assert.NoError(t, err) - assert.Len(t, deploymentsCreated, 0) - assert.Len(t, deploymentsUpdated, 37) - - cmList, err := clientset.CoreV1().ConfigMaps(namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, cmList.Items, 0) - }) -} - -/* - * mock executor to handle ceph commands - */ - -func osdIntegrationTestExecutor(t *testing.T, clientset *fake.Clientset, namespace string) *exectest.MockExecutor { - return &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - t.Logf("command: %s %v", command, args) - if command != "ceph" { - return "", errors.Errorf("unexpected command %q with args %v", command, args) - } - if args[0] == "auth" { - if args[1] == "get-or-create-key" { - return "{\"key\": \"does-not-matter\"}", nil - } - } - if args[0] == "osd" { - if args[1] == "ok-to-stop" { - osdID := args[2] - id, err := strconv.Atoi(osdID) - if err != nil { - panic(err) - } - t.Logf("returning ok for OSD %d", id) - return cephclientfake.OsdOkToStopOutput(id, []int{id}, true), nil - } - if args[1] == "ls" { - // ceph osd ls returns an array of osd IDs like [0,1,2] - // build this based on the number of deployments since they should be equal - // for this test - l, err := clientset.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - panic(fmt.Sprintf("failed to build 'ceph osd ls' output. %v", err)) - } - return cephclientfake.OsdLsOutput(len(l.Items)), nil - } - if args[1] == "tree" { - return cephclientfake.OsdTreeOutput(3, 3), nil // fake output for cluster with 3 nodes having 3 OSDs - } - if args[1] == "crush" { - if args[2] == "get-device-class" { - return cephclientfake.OSDDeviceClassOutput(args[3]), nil - } - } - } - if args[0] == "versions" { - // the update deploy code only cares about the mons from the ceph version command results - v := `{"mon":{"ceph version 16.2.2 (somehash) nautilus (stable)":3}}` - return v, nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - }, - } -} - -/* - * Unique and consistent OSD ID generator - */ - -type osdIDGenerator struct { - nextOSDID int - osdIDMap map[string]int -} - -func newOSDIDGenerator() osdIDGenerator { - return osdIDGenerator{ - nextOSDID: 0, - osdIDMap: map[string]int{}, - } -} - -func (g *osdIDGenerator) osdID(t *testing.T, namedResource string) int { - if id, ok := g.osdIDMap[namedResource]; ok { - t.Logf("resource %q has existing OSD ID %d", namedResource, id) - return id - } - id := g.nextOSDID - g.osdIDMap[namedResource] = id - g.nextOSDID++ - t.Logf("generated new OSD ID %d for resource %q", id, namedResource) - return id -} - -func newDummyStorageClassDeviceSet( - name string, namespace string, count int, portable bool, storageClassName string, -) cephv1.StorageClassDeviceSet { - return cephv1.StorageClassDeviceSet{ - Name: name, - Count: count, - Portable: portable, - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ - newDummyPVC("data", namespace, "10Gi", storageClassName), - }, - } -} - -func waitForNumConfigMaps(clientset kubernetes.Interface, namespace string, count int) []corev1.ConfigMap { - for { - cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - panic(err) - } - if len(cms.Items) >= count { - return cms.Items - } - time.Sleep(1 * time.Microsecond) - } -} - -// Helper method to set "completed" status on "starting" ConfigMaps. -// If the configmap is a result for a provision on nodes, create numOSDsIfOnNode OSDs in the return -func setStatusConfigMapToCompleted(t *testing.T, cm *corev1.ConfigMap, numOSDsIfOnNode int) { - status := parseOrchestrationStatus(cm.Data) - t.Logf("updating configmap %q status to completed", cm.Name) - // configmap names are deterministic can be mapped indirectly to an OSD ID, and since the - // configmaps are used to report completion status of OSD provisioning, we use this property in - // these unit tests - status.Status = OrchestrationStatusCompleted - if status.PvcBackedOSD { - // only one OSD per PVC - osdID := testIDGenerator.osdID(t, cm.Name) - status.OSDs = []OSDInfo{ - { - ID: osdID, - UUID: fmt.Sprintf("%032d", osdID), - BlockPath: "/dev/path/to/block", - CVMode: "raw", - }, - } - } else { - status.OSDs = []OSDInfo{} - for i := 0; i < numOSDsIfOnNode; i++ { - // in order to generate multiple OSDs on a node, pretend they have different configmap - // names (simply append the index). this is still deterministic. - osdID := testIDGenerator.osdID(t, fmt.Sprintf("%s-%d", cm.Name, i)) - disk := k8sutil.IndexToName(i) - status.OSDs = append(status.OSDs, OSDInfo{ - ID: osdID, - UUID: fmt.Sprintf("%032d", osdID), - BlockPath: fmt.Sprintf("/dev/vd%s", disk), - CVMode: "raw", - }) - } - } - s, _ := json.Marshal(status) - cm.Data[orchestrationStatusKey] = string(s) -} - -func setStatusConfigMapToFailed(t *testing.T, cm *corev1.ConfigMap) { - status := parseOrchestrationStatus(cm.Data) - t.Logf("updating configmap %q status to failed", cm.Name) - status.Status = OrchestrationStatusFailed - s, _ := json.Marshal(status) - cm.Data[orchestrationStatusKey] = string(s) -} - -func updateStatusConfigmap(clientset kubernetes.Interface, statusMapWatcher *watch.RaceFreeFakeWatcher, cm *corev1.ConfigMap) { - _, err := clientset.CoreV1().ConfigMaps(cm.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}) - if err != nil { - panic(err) - } - statusMapWatcher.Modify(cm) -} diff --git a/pkg/operator/ceph/cluster/osd/labels.go b/pkg/operator/ceph/cluster/osd/labels.go deleted file mode 100644 index 6cdde1104..000000000 --- a/pkg/operator/ceph/cluster/osd/labels.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "fmt" - "strconv" - "strings" - - "github.com/rook/rook/pkg/operator/ceph/controller" -) - -const ( - // CephDeviceSetLabelKey is the Rook device set label key - CephDeviceSetLabelKey = "ceph.rook.io/DeviceSet" - // CephSetIndexLabelKey is the Rook label key index - CephSetIndexLabelKey = "ceph.rook.io/setIndex" - // CephDeviceSetPVCIDLabelKey is the Rook PVC ID label key - CephDeviceSetPVCIDLabelKey = "ceph.rook.io/DeviceSetPVCId" - // OSDOverPVCLabelKey is the Rook PVC label key - OSDOverPVCLabelKey = "ceph.rook.io/pvc" - // TopologyLocationLabel is the crush location label added to OSD deployments - TopologyLocationLabel = "topology-location-%s" -) - -func makeStorageClassDeviceSetPVCLabel(storageClassDeviceSetName, pvcStorageClassDeviceSetPVCId string, setIndex int) map[string]string { - return map[string]string{ - CephDeviceSetLabelKey: storageClassDeviceSetName, - CephSetIndexLabelKey: fmt.Sprintf("%d", setIndex), - CephDeviceSetPVCIDLabelKey: pvcStorageClassDeviceSetPVCId, - } -} - -func (c *Cluster) getOSDLabels(osd OSDInfo, failureDomainValue string, portable bool) map[string]string { - stringID := fmt.Sprintf("%d", osd.ID) - labels := controller.CephDaemonAppLabels(AppName, c.clusterInfo.Namespace, "osd", stringID, true) - labels[OsdIdLabelKey] = stringID - labels[FailureDomainKey] = failureDomainValue - labels[portableKey] = strconv.FormatBool(portable) - for k, v := range getOSDTopologyLocationLabels(osd.Location) { - labels[k] = v - } - return labels -} - -func getOSDTopologyLocationLabels(topologyLocation string) map[string]string { - labels := map[string]string{} - locations := strings.Split(topologyLocation, " ") - for _, location := range locations { - loc := strings.Split(location, "=") - if len(loc) == 2 { - labels[fmt.Sprintf(TopologyLocationLabel, loc[0])] = loc[1] - } - } - return labels -} diff --git a/pkg/operator/ceph/cluster/osd/labels_test.go b/pkg/operator/ceph/cluster/osd/labels_test.go deleted file mode 100644 index 80cf31f51..000000000 --- a/pkg/operator/ceph/cluster/osd/labels_test.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestOSDTopologyLabels(t *testing.T) { - fakeLocation := "root=default host=ocs-deviceset-gp2-1-data-0-wh5wl region=us-east-1 zone=us-east-1c" - result := getOSDTopologyLocationLabels(fakeLocation) - assert.Equal(t, "us-east-1", result["topology-location-region"]) - assert.Equal(t, "ocs-deviceset-gp2-1-data-0-wh5wl", result["topology-location-host"]) - assert.Equal(t, "us-east-1c", result["topology-location-zone"]) -} diff --git a/pkg/operator/ceph/cluster/osd/osd.go b/pkg/operator/ceph/cluster/osd/osd.go deleted file mode 100644 index 50cc160e3..000000000 --- a/pkg/operator/ceph/cluster/osd/osd.go +++ /dev/null @@ -1,754 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package osd for the Ceph OSDs. -package osd - -import ( - "bufio" - "context" - "fmt" - "sort" - "strconv" - "strings" - "time" - - "k8s.io/client-go/kubernetes" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - osdconfig "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-osd") - cephVolumeRawEncryptionModeMinNautilusCephVersion = cephver.CephVersion{Major: 14, Minor: 2, Extra: 11} - cephVolumeRawEncryptionModeMinOctopusCephVersion = cephver.CephVersion{Major: 15, Minor: 2, Extra: 5} -) - -const ( - // AppName is the "app" label on osd pods - AppName = "rook-ceph-osd" - // FailureDomainKey is the label key whose value is the failure domain of the OSD - FailureDomainKey = "failure-domain" - prepareAppName = "rook-ceph-osd-prepare" - prepareAppNameFmt = "rook-ceph-osd-prepare-%s" - osdAppNameFmt = "rook-ceph-osd-%d" - defaultWaitTimeoutForHealthyOSD = 10 * time.Minute - // OsdIdLabelKey is the OSD label key - OsdIdLabelKey = "ceph-osd-id" - serviceAccountName = "rook-ceph-osd" - portableKey = "portable" - cephOsdPodMinimumMemory uint64 = 2048 // minimum amount of memory in MB to run the pod - bluestorePVCMetadata = "metadata" - bluestorePVCWal = "wal" - bluestorePVCData = "data" -) - -// Cluster keeps track of the OSDs -type Cluster struct { - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - rookVersion string - spec cephv1.ClusterSpec - ValidStorage cephv1.StorageScopeSpec // valid subset of `Storage`, computed at runtime - kv *k8sutil.ConfigMapKVStore - deviceSets []deviceSet -} - -// New creates an instance of the OSD manager -func New(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, spec cephv1.ClusterSpec, rookVersion string) *Cluster { - return &Cluster{ - context: context, - clusterInfo: clusterInfo, - spec: spec, - rookVersion: rookVersion, - kv: k8sutil.NewConfigMapKVStore(clusterInfo.Namespace, context.Clientset, clusterInfo.OwnerInfo), - } -} - -// OSDInfo represent all the properties of a given OSD -type OSDInfo struct { - ID int `json:"id"` - Cluster string `json:"cluster"` - UUID string `json:"uuid"` - DevicePartUUID string `json:"device-part-uuid"` - DeviceClass string `json:"device-class"` - // BlockPath is the logical Volume path for an OSD created by Ceph-volume with format '/dev//' or simply /dev/vdb if block mode is used - BlockPath string `json:"lv-path"` - MetadataPath string `json:"metadata-path"` - WalPath string `json:"wal-path"` - SkipLVRelease bool `json:"skip-lv-release"` - Location string `json:"location"` - LVBackedPV bool `json:"lv-backed-pv"` - CVMode string `json:"lv-mode"` - Store string `json:"store"` - // Ensure the OSD daemon has affinity with the same topology from the OSD prepare pod - TopologyAffinity string `json:"topologyAffinity"` -} - -// OrchestrationStatus represents the status of an OSD orchestration -type OrchestrationStatus struct { - OSDs []OSDInfo `json:"osds"` - Status string `json:"status"` - PvcBackedOSD bool `json:"pvc-backed-osd"` - Message string `json:"message"` -} - -type osdProperties struct { - //crushHostname refers to the hostname or PVC name when the OSD is provisioned on Nodes or PVC block device, respectively. - crushHostname string - devices []cephv1.Device - pvc corev1.PersistentVolumeClaimVolumeSource - metadataPVC corev1.PersistentVolumeClaimVolumeSource - walPVC corev1.PersistentVolumeClaimVolumeSource - pvcSize string - selection cephv1.Selection - resources corev1.ResourceRequirements - storeConfig osdconfig.StoreConfig - placement cephv1.Placement - preparePlacement *cephv1.Placement - metadataDevice string - portable bool - tuneSlowDeviceClass bool - tuneFastDeviceClass bool - schedulerName string - encrypted bool - deviceSetName string -} - -func (osdProps osdProperties) onPVC() bool { - return osdProps.pvc.ClaimName != "" -} - -func (osdProps osdProperties) onPVCWithMetadata() bool { - return osdProps.metadataPVC.ClaimName != "" -} - -func (osdProps osdProperties) onPVCWithWal() bool { - return osdProps.walPVC.ClaimName != "" -} - -func (osdProps osdProperties) getPreparePlacement() cephv1.Placement { - // If the osd prepare placement is specified, use it - if osdProps.preparePlacement != nil { - return *osdProps.preparePlacement - } - // Fall back to use the same placement as requested for the osd daemons - return osdProps.placement -} - -// Start the osd management -func (c *Cluster) Start() error { - namespace := c.clusterInfo.Namespace - config := c.newProvisionConfig() - errs := newProvisionErrors() - - // Validate pod's memory if specified - for resourceKey, resourceValue := range c.spec.Resources { - if strings.HasPrefix(resourceKey, cephv1.ResourcesKeyOSD) { - err := controller.CheckPodMemory(resourceKey, resourceValue, cephOsdPodMinimumMemory) - if err != nil { - return errors.Wrap(err, "failed to check pod memory") - } - } - } - logger.Infof("start running osds in namespace %q", namespace) - - if !c.spec.Storage.UseAllNodes && len(c.spec.Storage.Nodes) == 0 && len(c.spec.Storage.StorageClassDeviceSets) == 0 { - logger.Warningf("useAllNodes is set to false and no nodes, storageClassDevicesets or volumeSources are specified, no OSD pods are going to be created") - } - - if c.spec.WaitTimeoutForHealthyOSDInMinutes != 0 { - c.clusterInfo.OsdUpgradeTimeout = c.spec.WaitTimeoutForHealthyOSDInMinutes * time.Minute - } else { - c.clusterInfo.OsdUpgradeTimeout = defaultWaitTimeoutForHealthyOSD - } - logger.Infof("wait timeout for healthy OSDs during upgrade or restart is %q", c.clusterInfo.OsdUpgradeTimeout) - - // prepare for updating existing OSDs - updateQueue, deployments, err := c.getOSDUpdateInfo(errs) - if err != nil { - return errors.Wrapf(err, "failed to get information about currently-running OSD Deployments in namespace %q", namespace) - } - logger.Debugf("%d of %d OSD Deployments need updated", updateQueue.Len(), deployments.Len()) - updateConfig := c.newUpdateConfig(config, updateQueue, deployments) - - // prepare for creating new OSDs - statusConfigMaps := sets.NewString() - - logger.Info("start provisioning the OSDs on PVCs, if needed") - pvcConfigMaps, err := c.startProvisioningOverPVCs(config, errs) - if err != nil { - return err - } - statusConfigMaps = statusConfigMaps.Union(pvcConfigMaps) - - logger.Info("start provisioning the OSDs on nodes, if needed") - nodeConfigMaps, err := c.startProvisioningOverNodes(config, errs) - if err != nil { - return err - } - statusConfigMaps = statusConfigMaps.Union(nodeConfigMaps) - - createConfig := c.newCreateConfig(config, statusConfigMaps, deployments) - - // do the update and create operations - err = c.updateAndCreateOSDs(createConfig, updateConfig, errs) - if err != nil { - return errors.Wrapf(err, "failed to update/create OSDs") - } - - if errs.len() > 0 { - return errors.Errorf("%d failures encountered while running osds on nodes in namespace %q. %s", - errs.len(), namespace, errs.asMessages()) - } - - // clean up status configmaps that might be dangling from previous reconciles - // for example, if the storage spec changed from or a node failed in a previous failed reconcile - c.deleteAllStatusConfigMaps() - - // The following block is used to apply any command(s) required by an upgrade - c.applyUpgradeOSDFunctionality() - - logger.Infof("finished running OSDs in namespace %q", namespace) - return nil -} - -func (c *Cluster) getExistingOSDDeploymentsOnPVCs() (sets.String, error) { - ctx := context.TODO() - listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s,%s", k8sutil.AppAttr, AppName, OSDOverPVCLabelKey)} - - deployments, err := c.context.Clientset.AppsV1().Deployments(c.clusterInfo.Namespace).List(ctx, listOpts) - if err != nil { - return nil, errors.Wrap(err, "failed to query existing OSD deployments") - } - - result := sets.NewString() - for _, deployment := range deployments.Items { - if pvcID, ok := deployment.Labels[OSDOverPVCLabelKey]; ok { - result.Insert(pvcID) - } - } - - return result, nil -} - -func deploymentOnNode(c *Cluster, osd OSDInfo, nodeName string, config *provisionConfig) (*appsv1.Deployment, error) { - osdLongName := fmt.Sprintf("OSD %d on node %q", osd.ID, nodeName) - - osdProps, err := c.getOSDPropsForNode(nodeName, osd.DeviceClass) - if err != nil { - return nil, errors.Wrapf(err, "failed to generate config for %s", osdLongName) - } - - d, err := c.makeDeployment(osdProps, osd, config) - if err != nil { - return nil, errors.Wrapf(err, "failed to generate deployment for %s", osdLongName) - } - - err = setOSDProperties(c, osdProps, osd) - if err != nil { - return nil, errors.Wrapf(err, "failed to prepare deployment for %s", osdLongName) - } - - return d, nil -} - -func deploymentOnPVC(c *Cluster, osd OSDInfo, pvcName string, config *provisionConfig) (*appsv1.Deployment, error) { - osdLongName := fmt.Sprintf("OSD %d on PVC %q", osd.ID, pvcName) - - osdProps, err := c.getOSDPropsForPVC(pvcName, osd.DeviceClass) - if err != nil { - return nil, errors.Wrapf(err, "failed to generate config for %s", osdLongName) - } - - d, err := c.makeDeployment(osdProps, osd, config) - if err != nil { - return nil, errors.Wrapf(err, "failed to generate deployment for %s", osdLongName) - } - - err = setOSDProperties(c, osdProps, osd) - if err != nil { - return nil, errors.Wrapf(err, "failed to prepare deployment for %s", osdLongName) - } - - return d, nil -} - -// setOSDProperties is used to configure an OSD with parameters which can not be set via explicit -// command-line arguments. -func setOSDProperties(c *Cluster, osdProps osdProperties, osd OSDInfo) error { - // OSD's 'primary-affinity' has to be configured via command which goes through mons - if osdProps.storeConfig.PrimaryAffinity != "" { - return cephclient.SetPrimaryAffinity(c.context, c.clusterInfo, osd.ID, osdProps.storeConfig.PrimaryAffinity) - } - return nil -} - -func (c *Cluster) resolveNode(nodeName, deviceClass string) *cephv1.Node { - // fully resolve the storage config and resources for this node - rookNode := c.ValidStorage.ResolveNode(nodeName) - if rookNode == nil { - return nil - } - rookNode.Resources = k8sutil.MergeResourceRequirements(rookNode.Resources, cephv1.GetOSDResources(c.spec.Resources, deviceClass)) - - return rookNode -} - -func (c *Cluster) getOSDPropsForNode(nodeName, deviceClass string) (osdProperties, error) { - // fully resolve the storage config and resources for this node - n := c.resolveNode(nodeName, deviceClass) - if n == nil { - return osdProperties{}, errors.Errorf("failed to resolve node %q", nodeName) - } - - storeConfig := osdconfig.ToStoreConfig(n.Config) - metadataDevice := osdconfig.MetadataDevice(n.Config) - osdProps := osdProperties{ - crushHostname: n.Name, - devices: n.Devices, - selection: n.Selection, - resources: n.Resources, - storeConfig: storeConfig, - metadataDevice: metadataDevice, - } - - return osdProps, nil -} - -func (c *Cluster) getOSDPropsForPVC(pvcName, osdDeviceClass string) (osdProperties, error) { - for _, deviceSet := range c.deviceSets { - // The data PVC template is required. - dataSource, dataOK := deviceSet.PVCSources[bluestorePVCData] - if !dataOK { - logger.Warningf("failed to find data source daemon for device set %q, missing the data template", deviceSet.Name) - continue - } - - if pvcName == dataSource.ClaimName { - metadataSource, metadataOK := deviceSet.PVCSources[bluestorePVCMetadata] - if metadataOK { - logger.Infof("OSD will have its main bluestore block on %q and its metadata device on %q", dataSource.ClaimName, metadataSource.ClaimName) - } else { - logger.Infof("OSD will have its main bluestore block on %q", dataSource.ClaimName) - } - - walSource, walOK := deviceSet.PVCSources[bluestorePVCWal] - if walOK { - logger.Infof("OSD will have its wal device on %q", walSource.ClaimName) - } - - if deviceSet.Resources.Limits == nil && deviceSet.Resources.Requests == nil { - deviceSet.Resources = cephv1.GetOSDResources(c.spec.Resources, osdDeviceClass) - } - - osdProps := osdProperties{ - crushHostname: dataSource.ClaimName, - pvc: dataSource, - metadataPVC: metadataSource, - walPVC: walSource, - resources: deviceSet.Resources, - placement: deviceSet.Placement, - preparePlacement: deviceSet.PreparePlacement, - portable: deviceSet.Portable, - tuneSlowDeviceClass: deviceSet.TuneSlowDeviceClass, - tuneFastDeviceClass: deviceSet.TuneFastDeviceClass, - pvcSize: deviceSet.Size, - schedulerName: deviceSet.SchedulerName, - encrypted: deviceSet.Encrypted, - deviceSetName: deviceSet.Name, - } - osdProps.storeConfig.InitialWeight = deviceSet.CrushInitialWeight - osdProps.storeConfig.PrimaryAffinity = deviceSet.CrushPrimaryAffinity - - // If OSD isn't portable, we're getting the host name either from the osd deployment that was already initialized - // or from the osd prepare job from initial creation. - if !deviceSet.Portable { - var err error - osdProps.crushHostname, err = c.getPVCHostName(pvcName) - if err != nil { - return osdProperties{}, errors.Wrapf(err, "failed to get crushHostname of non-portable PVC %q", pvcName) - } - } - return osdProps, nil - } - } - return osdProperties{}, errors.Errorf("failed to find valid VolumeSource for PVC %q", pvcName) -} - -// getPVCHostName finds the node where an OSD pod should be assigned with a node selector. -// First look for the node selector that was previously used for the OSD, or if a new OSD -// check for the assignment of the OSD prepare job. -func (c *Cluster) getPVCHostName(pvcName string) (string, error) { - ctx := context.TODO() - listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", OSDOverPVCLabelKey, pvcName)} - - // Check for the existence of the OSD deployment where the node selector was applied - // in a previous reconcile. - deployments, err := c.context.Clientset.AppsV1().Deployments(c.clusterInfo.Namespace).List(ctx, listOpts) - if err != nil { - return "", errors.Wrapf(err, "failed to get deployment for osd with pvc %q", pvcName) - } - for _, d := range deployments.Items { - selectors := d.Spec.Template.Spec.NodeSelector - for label, value := range selectors { - if label == corev1.LabelHostname { - return value, nil - } - } - } - - // Since the deployment wasn't found it must be a new deployment so look at the node - // assignment of the OSD prepare pod - pods, err := c.context.Clientset.CoreV1().Pods(c.clusterInfo.Namespace).List(ctx, listOpts) - if err != nil { - return "", errors.Wrapf(err, "failed to get pod for osd with pvc %q", pvcName) - } - for _, pod := range pods.Items { - name, err := k8sutil.GetNodeHostName(c.context.Clientset, pod.Spec.NodeName) - if err != nil { - logger.Warningf("falling back to node name %s since hostname not found for node", pod.Spec.NodeName) - name = pod.Spec.NodeName - } - if name == "" { - return "", errors.Errorf("node name not found on the osd pod %q", pod.Name) - } - return name, nil //nolint // no need for else statement - } - - return "", errors.Errorf("node selector not found on deployment for osd with pvc %q", pvcName) -} - -func getOSDID(d *appsv1.Deployment) (int, error) { - osdID, err := strconv.Atoi(d.Labels[OsdIdLabelKey]) - if err != nil { - // add a question to the user AFTER the error text to help them recover from user error - return -1, errors.Wrapf(err, "failed to parse label \"ceph-osd-id\" on deployment %q. did a user modify the deployment and remove the label?", d.Name) - } - return osdID, nil -} - -func (c *Cluster) getOSDInfo(d *appsv1.Deployment) (OSDInfo, error) { - container := d.Spec.Template.Spec.Containers[0] - var osd OSDInfo - - osdID, err := getOSDID(d) - if err != nil { - return OSDInfo{}, err - } - osd.ID = osdID - - isPVC := false - - for _, envVar := range d.Spec.Template.Spec.Containers[0].Env { - if envVar.Name == "ROOK_OSD_UUID" { - osd.UUID = envVar.Value - } - if envVar.Name == "ROOK_PVC_BACKED_OSD" { - isPVC = true - } - if envVar.Name == "ROOK_BLOCK_PATH" || envVar.Name == "ROOK_LV_PATH" { - osd.BlockPath = envVar.Value - } - if envVar.Name == "ROOK_CV_MODE" { - osd.CVMode = envVar.Value - } - if envVar.Name == "ROOK_TOPOLOGY_AFFINITY" { - osd.TopologyAffinity = envVar.Value - } - if envVar.Name == "ROOK_LV_BACKED_PV" { - lvBackedPV, err := strconv.ParseBool(envVar.Value) - if err != nil { - return OSDInfo{}, errors.Wrap(err, "failed to parse ROOK_LV_BACKED_PV") - } - osd.LVBackedPV = lvBackedPV - } - if envVar.Name == osdMetadataDeviceEnvVarName { - osd.MetadataPath = envVar.Value - } - if envVar.Name == osdWalDeviceEnvVarName { - osd.WalPath = envVar.Value - } - if envVar.Name == osdDeviceClassEnvVarName { - osd.DeviceClass = envVar.Value - } - } - - // Needed for upgrade from v1.5 to v1.6. Rook v1.5 did not set ROOK_BLOCK_PATH for OSDs on nodes - // where the 'activate' init container was needed. - if !isPVC && osd.BlockPath == "" { - osd.BlockPath, err = getBlockPathFromActivateInitContainer(d) - if err != nil { - return OSDInfo{}, errors.Wrapf(err, "failed to extract legacy OSD block path from deployment %q", d.Name) - } - } - - // If CVMode is empty, this likely means we upgraded Rook - // This property did not exist before so we need to initialize it - if osd.CVMode == "" { - logger.Infof("required CVMode for OSD %d was not found. assuming this is an LVM OSD", osd.ID) - osd.CVMode = "lvm" - } - - // if the ROOK_TOPOLOGY_AFFINITY env var was not found in the loop above, detect it from the node - if isPVC && osd.TopologyAffinity == "" { - osd.TopologyAffinity, err = getTopologyFromNode(c.context.Clientset, d, osd) - if err != nil { - logger.Errorf("failed to get topology affinity for osd %d. %v", osd.ID, err) - } - } - - locationFound := false - for _, a := range container.Args { - locationPrefix := "--crush-location=" - if strings.HasPrefix(a, locationPrefix) { - locationFound = true - // Extract the same CRUSH location as originally determined by the OSD prepare pod - // by cutting off the prefix: --crush-location= - osd.Location = a[len(locationPrefix):] - } - } - - if !locationFound { - location, _, err := getLocationFromPod(c.context.Clientset, d, cephclient.GetCrushRootFromSpec(&c.spec)) - if err != nil { - logger.Errorf("failed to get location. %v", err) - } else { - osd.Location = location - } - } - - if osd.UUID == "" || osd.BlockPath == "" { - return OSDInfo{}, errors.Errorf("failed to get required osdInfo. %+v", osd) - } - - return osd, nil -} - -func osdIsOnPVC(d *appsv1.Deployment) bool { - if _, ok := d.Labels[OSDOverPVCLabelKey]; ok { - return true - } - return false -} - -func getNodeOrPVCName(d *appsv1.Deployment) (string, error) { - if v, ok := d.Labels[OSDOverPVCLabelKey]; ok { - return v, nil // OSD is on PVC - } - for k, v := range d.Spec.Template.Spec.NodeSelector { - if k == corev1.LabelHostname { - return v, nil - } - } - return "", errors.Errorf("failed to find node/PVC name for OSD deployment %q: %+v", d.Name, d) -} - -// Needed for upgrades from v1.5 to v1.6 -func getBlockPathFromActivateInitContainer(d *appsv1.Deployment) (string, error) { - initContainers := d.Spec.Template.Spec.InitContainers - for _, c := range initContainers { - if c.Name != activatePVCOSDInitContainer { - continue - } - if len(c.Command) != 3 { - return "", errors.Errorf("activate init container has fewer command arguments (%d) than expected (3)", len(c.Command)) - } - script := c.Command[2] - varAssignment := "DEVICE=" // this variable assignment is followed by the block path - scanner := bufio.NewScanner(strings.NewReader(script)) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, varAssignment) { - device := strings.TrimPrefix(line, varAssignment) - return device, nil - } - } - if scanner.Err() != nil { - return "", errors.Wrapf(scanner.Err(), "failed to scan through activate init script for variable assignment %q", varAssignment) - } - } - return "", errors.Errorf("failed to find activate init container") -} - -func getLocationFromPod(clientset kubernetes.Interface, d *appsv1.Deployment, crushRoot string) (string, string, error) { - ctx := context.TODO() - pods, err := clientset.CoreV1().Pods(d.Namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", OsdIdLabelKey, d.Labels[OsdIdLabelKey])}) - if err != nil || len(pods.Items) == 0 { - return "", "", err - } - nodeName := pods.Items[0].Spec.NodeName - hostName, err := k8sutil.GetNodeHostName(clientset, nodeName) - if err != nil { - return "", "", err - } - portable, ok := d.GetLabels()[portableKey] - if ok && portable == "true" { - pvcName, ok := d.GetLabels()[OSDOverPVCLabelKey] - if ok { - hostName = pvcName - } - } - return GetLocationWithNode(clientset, nodeName, crushRoot, hostName) -} - -func getTopologyFromNode(clientset kubernetes.Interface, d *appsv1.Deployment, osd OSDInfo) (string, error) { - portable, ok := d.GetLabels()[portableKey] - if !ok || portable != "true" { - // osd is not portable, no need to load the topology affinity - return "", nil - } - logger.Infof("detecting topology affinity for osd %d after upgrade", osd.ID) - - // Get the osd pod and its assigned node, then look up the node labels - ctx := context.TODO() - pods, err := clientset.CoreV1().Pods(d.Namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", OsdIdLabelKey, d.Labels[OsdIdLabelKey])}) - if err != nil { - return "", errors.Wrap(err, "failed to get osd pod") - } - if len(pods.Items) == 0 { - return "", errors.New("an osd pod does not exist") - } - nodeName := pods.Items[0].Spec.NodeName - if nodeName == "" { - return "", errors.Errorf("osd %d is not assigned to a node, cannot detect topology affinity", osd.ID) - } - node, err := getNode(clientset, nodeName) - if err != nil { - return "", errors.Wrap(err, "failed to get the node for topology affinity") - } - _, topologyAffinity := ExtractOSDTopologyFromLabels(node.Labels) - logger.Infof("found osd %d topology affinity at %q", osd.ID, topologyAffinity) - return topologyAffinity, nil -} - -// GetLocationWithNode gets the topology information about the node. The return values are: -// location: The CRUSH properties for the OSD to apply -// topologyAffinity: The label to be applied to the OSD daemon to guarantee it will start in the same -// topology as the OSD prepare job. -func GetLocationWithNode(clientset kubernetes.Interface, nodeName string, crushRoot, crushHostname string) (string, string, error) { - node, err := getNode(clientset, nodeName) - if err != nil { - return "", "", errors.Wrap(err, "could not get the node for topology labels") - } - - // If the operator did not pass a host name, look up the hostname label. - // This happens when the operator doesn't know on what node the osd will be assigned (non-portable PVCs). - if crushHostname == "" { - crushHostname, err = k8sutil.GetNodeHostNameLabel(node) - if err != nil { - return "", "", errors.Wrapf(err, "failed to get the host name label for node %q", node.Name) - } - } - - // Start with the host name in the CRUSH map - // Keep the fully qualified host name in the crush map, but replace the dots with dashes to satisfy ceph - hostName := cephclient.NormalizeCrushName(crushHostname) - locArgs := []string{fmt.Sprintf("root=%s", crushRoot), fmt.Sprintf("host=%s", hostName)} - - nodeLabels := node.GetLabels() - topologyAffinity := updateLocationWithNodeLabels(&locArgs, nodeLabels) - - loc := strings.Join(locArgs, " ") - logger.Infof("CRUSH location=%s", loc) - return loc, topologyAffinity, nil -} - -// getNode will try to get the node object for the provided nodeName -// it will try using the node's name it's hostname label -func getNode(clientset kubernetes.Interface, nodeName string) (*corev1.Node, error) { - ctx := context.TODO() - var node *corev1.Node - var err error - // try to find by the node by matching the provided nodeName - node, err = clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) - if kerrors.IsNotFound(err) { - listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%q=%q", corev1.LabelHostname, nodeName)} - nodeList, err := clientset.CoreV1().Nodes().List(ctx, listOpts) - if err != nil || len(nodeList.Items) < 1 { - return nil, errors.Wrapf(err, "could not find node %q hostname label", nodeName) - } - return &nodeList.Items[0], nil - } else if err != nil { - return nil, errors.Wrapf(err, "could not find node %q by name", nodeName) - } - - return node, nil -} - -func updateLocationWithNodeLabels(location *[]string, nodeLabels map[string]string) string { - topology, topologyAffinity := ExtractOSDTopologyFromLabels(nodeLabels) - - keys := make([]string, 0, len(topology)) - for k := range topology { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, topologyType := range keys { - if topologyType != "host" { - cephclient.UpdateCrushMapValue(location, topologyType, topology[topologyType]) - } - } - return topologyAffinity -} - -func (c *Cluster) applyUpgradeOSDFunctionality() { - var osdVersion *cephver.CephVersion - - // Get all the daemons versions - versions, err := cephclient.GetAllCephDaemonVersions(c.context, c.clusterInfo) - if err != nil { - logger.Warningf("failed to get ceph daemons versions; this likely means there are no osds yet. %v", err) - return - } - - // If length is one, this clearly indicates that all the osds are running the same version - // If this is the first time we are creating a cluster length will be 0 - // On an initial OSD bootstrap, by the time we reach this code, the OSDs haven't registered yet - // Basically, this task is happening too quickly and OSD pods are not running yet. - // That's not an issue since it's an initial bootstrap and not an update. - if len(versions.Osd) == 1 { - for v := range versions.Osd { - osdVersion, err = cephver.ExtractCephVersion(v) - if err != nil { - logger.Warningf("failed to extract ceph version. %v", err) - return - } - // if the version of these OSDs is Octopus then we run the command - if osdVersion.IsOctopus() { - err = cephclient.EnableReleaseOSDFunctionality(c.context, c.clusterInfo, "octopus") - if err != nil { - logger.Warningf("failed to enable new osd functionality. %v", err) - return - } - } - } - } -} diff --git a/pkg/operator/ceph/cluster/osd/osd_test.go b/pkg/operator/ceph/cluster/osd/osd_test.go deleted file mode 100644 index 9967ae4fd..000000000 --- a/pkg/operator/ceph/cluster/osd/osd_test.go +++ /dev/null @@ -1,636 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "context" - "os" - "testing" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - fakeclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephclientfake "github.com/rook/rook/pkg/daemon/ceph/client/fake" - discoverDaemon "github.com/rook/rook/pkg/daemon/discover" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" - opconfig "github.com/rook/rook/pkg/operator/ceph/config" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - "github.com/tevino/abool" - apps "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes/fake" - k8stesting "k8s.io/client-go/testing" -) - -func TestOSDProperties(t *testing.T) { - osdProps := []osdProperties{ - {pvc: corev1.PersistentVolumeClaimVolumeSource{ClaimName: "claim"}, - metadataPVC: corev1.PersistentVolumeClaimVolumeSource{ClaimName: "claim"}}, - {pvc: corev1.PersistentVolumeClaimVolumeSource{ClaimName: ""}, - metadataPVC: corev1.PersistentVolumeClaimVolumeSource{ClaimName: ""}}, - } - expected := [][2]bool{ - {true, true}, - {false, false}, - } - for i, p := range osdProps { - actual := [2]bool{p.onPVC(), p.onPVCWithMetadata()} - assert.Equal(t, expected[i], actual, "detected a problem in `expected[%d]`", i) - } -} - -func TestStart(t *testing.T) { - clientset := fake.NewSimpleClientset() - clusterInfo := &cephclient.ClusterInfo{ - Namespace: "ns", - CephVersion: cephver.Nautilus, - } - context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: &exectest.MockExecutor{}} - spec := cephv1.ClusterSpec{} - c := New(context, clusterInfo, spec, "myversion") - - // Start the first time - err := c.Start() - assert.Nil(t, err) - - // Should not fail if it already exists - err = c.Start() - assert.Nil(t, err) -} - -func createDiscoverConfigmap(nodeName, ns string, clientset *fake.Clientset) error { - ctx := context.TODO() - data := make(map[string]string, 1) - data[discoverDaemon.LocalDiskCMData] = `[{"name":"sdx","parent":"","hasChildren":false,"devLinks":"/dev/disk/by-id/scsi-36001405f826bd553d8c4dbf9f41c18be /dev/disk/by-id/wwn-0x6001405f826bd553d8c4dbf9f41c18be /dev/disk/by-path/ip-127.0.0.1:3260-iscsi-iqn.2016-06.world.srv:storage.target01-lun-1","size":10737418240,"uuid":"","serial":"36001405f826bd553d8c4dbf9f41c18be","type":"disk","rotational":true,"readOnly":false,"ownPartition":true,"filesystem":"","vendor":"LIO-ORG","model":"disk02","wwn":"0x6001405f826bd553","wwnVendorExtension":"0x6001405f826bd553d8c4dbf9f41c18be","empty":true}]` - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "local-device-" + nodeName, - Namespace: ns, - Labels: map[string]string{ - k8sutil.AppAttr: discoverDaemon.AppName, - discoverDaemon.NodeAttr: nodeName, - }, - }, - Data: data, - } - _, err := clientset.CoreV1().ConfigMaps(ns).Create(ctx, cm, metav1.CreateOptions{}) - return err -} - -func createNode(nodeName string, condition corev1.NodeConditionType, clientset *fake.Clientset) error { - ctx := context.TODO() - node := &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - Status: corev1.NodeStatus{ - Conditions: []corev1.NodeCondition{ - { - Type: condition, Status: corev1.ConditionTrue, - }, - }, - }, - } - _, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) - return err -} - -func TestAddRemoveNode(t *testing.T) { - ctx := context.TODO() - namespace := "ns-add-remove" - // create a storage spec with the given nodes/devices/dirs - nodeName := "node23" - - oldConditionExportFunc := updateConditionFunc - defer func() { - updateConditionFunc = oldConditionExportFunc - }() - // stub out the conditionExportFunc to do nothing. we do not have a fake Rook interface that - // allows us to interact with a CephCluster resource like the fake K8s clientset. - updateConditionFunc = func(c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status corev1.ConditionStatus, reason cephv1.ConditionReason, message string) { - // do nothing - } - - // set up a fake k8s client set and watcher to generate events that the operator will listen to - clientset := fake.NewSimpleClientset() - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - test.AddReadyNode(t, clientset, nodeName, "23.23.23.23") - cmErr := createDiscoverConfigmap(nodeName, "rook-system", clientset) - assert.Nil(t, cmErr) - - statusMapWatcher := watch.NewFake() - clientset.PrependWatchReactor("configmaps", k8stesting.DefaultWatchReactor(statusMapWatcher, nil)) - - clusterInfo := &cephclient.ClusterInfo{ - Namespace: namespace, - CephVersion: cephver.Nautilus, - } - clusterInfo.SetName("rook-ceph-test") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - generateKey := "expected key" - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - return "{\"key\": \"" + generateKey + "\"}", nil - }, - } - - context := &clusterd.Context{ - Clientset: clientset, - ConfigDir: "/var/lib/rook", - Executor: executor, - RequestCancelOrchestration: abool.New(), - RookClientset: fakeclient.NewSimpleClientset(), - } - spec := cephv1.ClusterSpec{ - DataDirHostPath: context.ConfigDir, - Storage: cephv1.StorageScopeSpec{ - Nodes: []cephv1.Node{ - { - Name: nodeName, - Selection: cephv1.Selection{ - Devices: []cephv1.Device{{Name: "sdx"}}, - }, - }, - }, - }, - } - c := New(context, clusterInfo, spec, "myversion") - - // kick off the start of the orchestration in a goroutine - var startErr error - startCompleted := false - go func() { - startErr = c.Start() - startCompleted = true - }() - - mockNodeOrchestrationCompletion(c, nodeName, statusMapWatcher) - waitForOrchestrationCompletion(c, nodeName, &startCompleted) - - // verify orchestration for adding the node succeeded - assert.True(t, startCompleted) - assert.NoError(t, startErr) - _, err := clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName(1), metav1.GetOptions{}) - assert.NoError(t, err) - - // simulate the OSD pod having been created - osdPod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ - Name: "osdPod", - Labels: map[string]string{k8sutil.AppAttr: AppName}}} - _, err = c.context.Clientset.CoreV1().Pods(c.clusterInfo.Namespace).Create(ctx, osdPod, metav1.CreateOptions{}) - assert.NoError(t, err) - - // mock the ceph calls that will be called during remove node - context.Executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "status" { - return `{"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "osd" { - if args[1] == "df" { - return `{"nodes":[{"id":1,"name":"osd.1","kb_used":0}]}`, nil - } - if args[1] == "dump" { - // OSD 1 is down and out - return `{"OSDs": [{"OSD": 1, "Up": 0, "In": 0}]}`, nil - } - if args[1] == "safe-to-destroy" { - return `{"safe_to_destroy":[1],"active":[],"missing_stats":[],"stored_pgs":[]}`, nil - } - if args[1] == "set" { - return "", nil - } - if args[1] == "unset" { - return "", nil - } - if args[1] == "crush" { - if args[2] == "reweight" { - return "", nil - } - if args[2] == "rm" { - return "", nil - } - if args[2] == "get-device-class" { - return cephclientfake.OSDDeviceClassOutput(args[3]), nil - } - } - if args[1] == "out" { - return "", nil - } - if args[1] == "rm" { - assert.Equal(t, "1", args[2]) - return "", nil - } - if args[1] == "find" { - return `{"crush_location":{"host":"my-host"}}`, nil - } - if args[1] == "ok-to-stop" { - return cephclientfake.OsdOkToStopOutput(1, []int{1}, true), nil - } - } - if args[0] == "df" && args[1] == "detail" { - return `{"stats":{"total_bytes":0,"total_used_bytes":0,"total_avail_bytes":3072}}`, nil - } - if args[0] == "pg" && args[1] == "dump" { - return `{}`, nil - } - if args[0] == "auth" && args[1] == "del" { - assert.Equal(t, "osd.1", args[2]) - return "", nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - }, - } - - // modify the storage spec to remove the node from the cluster - spec.Storage.Nodes = []cephv1.Node{} - c = New(context, clusterInfo, spec, "myversion") - - // reset the orchestration status watcher - statusMapWatcher = watch.NewFake() - clientset.PrependWatchReactor("configmaps", k8stesting.DefaultWatchReactor(statusMapWatcher, nil)) - - startErr = nil - startCompleted = false - go func() { - startErr = c.Start() - startCompleted = true - }() - - mockNodeOrchestrationCompletion(c, nodeName, statusMapWatcher) - waitForOrchestrationCompletion(c, nodeName, &startCompleted) - - // verify orchestration for removing the node succeeded - assert.True(t, startCompleted) - assert.NoError(t, startErr) - // deployment should still exist; OSDs are removed by health monitor code only if they are down, - // out, and the user has set removeOSDsIfOutAndSafeToRemove - _, err = clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName(1), metav1.GetOptions{}) - assert.NoError(t, err) - - removeIfOutAndSafeToRemove := true - healthMon := NewOSDHealthMonitor(context, cephclient.AdminClusterInfo(namespace), removeIfOutAndSafeToRemove, cephv1.CephClusterHealthCheckSpec{}) - healthMon.checkOSDHealth() - _, err = clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName(1), metav1.GetOptions{}) - assert.True(t, k8serrors.IsNotFound(err)) -} - -func TestAddNodeFailure(t *testing.T) { - // create a storage spec with the given nodes/devices/dirs - nodeName := "node1672" - - // create a fake clientset that will return an error when the operator tries to create a job - clientset := fake.NewSimpleClientset() - clientset.PrependReactor("create", "jobs", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.New("mock failed to create jobs") - }) - nodeErr := createNode(nodeName, corev1.NodeReady, clientset) - assert.Nil(t, nodeErr) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - cmErr := createDiscoverConfigmap(nodeName, "rook-system", clientset) - assert.Nil(t, cmErr) - - clusterInfo := &cephclient.ClusterInfo{ - Namespace: "ns-add-remove", - CephVersion: cephver.Nautilus, - } - clusterInfo.SetName("testcluster") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: &exectest.MockExecutor{}, RequestCancelOrchestration: abool.New()} - spec := cephv1.ClusterSpec{ - DataDirHostPath: context.ConfigDir, - Storage: cephv1.StorageScopeSpec{ - Nodes: []cephv1.Node{ - { - Name: nodeName, - Selection: cephv1.Selection{ - Devices: []cephv1.Device{{Name: "sdx"}}, - }, - }, - }, - }, - } - c := New(context, clusterInfo, spec, "myversion") - - // kick off the start of the orchestration in a goroutine - var startErr error - startCompleted := false - go func() { - startErr = c.Start() - startCompleted = true - }() - - // wait for orchestration to complete - waitForOrchestrationCompletion(c, nodeName, &startCompleted) - - // verify orchestration failed (because the operator failed to create a job) - assert.True(t, startCompleted) - assert.NotNil(t, startErr) -} - -func TestGetPVCHostName(t *testing.T) { - ctx := context.TODO() - clientset := fake.NewSimpleClientset() - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns"} - clusterInfo.SetName("mycluster") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - c := &Cluster{context: &clusterd.Context{Clientset: clientset}, clusterInfo: clusterInfo} - osdInfo := OSDInfo{ID: 23} - pvcName := "test-pvc" - - // fail to get the host name when there is no pod or deployment - name, err := c.getPVCHostName(pvcName) - assert.Error(t, err) - assert.Equal(t, "", name) - - // Create a sample osd deployment - osdDeployment := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "osd-23", - Namespace: c.clusterInfo.Namespace, - Labels: c.getOSDLabels(osdInfo, "", true), - }, - } - k8sutil.AddLabelToDeployment(OSDOverPVCLabelKey, pvcName, osdDeployment) - osdDeployment.Spec.Template.Spec.NodeSelector = map[string]string{corev1.LabelHostname: "testnode"} - - _, err = clientset.AppsV1().Deployments(c.clusterInfo.Namespace).Create(ctx, osdDeployment, metav1.CreateOptions{}) - assert.NoError(t, err) - - // get the host name based on the deployment - name, err = c.getPVCHostName(pvcName) - assert.NoError(t, err) - assert.Equal(t, "testnode", name) - - // delete the deployment and get the host name based on the pod - err = clientset.AppsV1().Deployments(c.clusterInfo.Namespace).Delete(ctx, osdDeployment.Name, metav1.DeleteOptions{}) - assert.NoError(t, err) - osdPod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "osd-23", - Namespace: c.clusterInfo.Namespace, - Labels: c.getOSDLabels(osdInfo, "", true), - }, - } - osdPod.Labels = map[string]string{OSDOverPVCLabelKey: pvcName} - osdPod.Spec.NodeName = "testnode" - _, err = clientset.CoreV1().Pods(c.clusterInfo.Namespace).Create(ctx, osdPod, metav1.CreateOptions{}) - assert.NoError(t, err) - - name, err = c.getPVCHostName(pvcName) - assert.NoError(t, err) - assert.Equal(t, "testnode", name) -} - -func TestGetOSDInfo(t *testing.T) { - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns"} - clusterInfo.SetName("test") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - context := &clusterd.Context{} - spec := cephv1.ClusterSpec{DataDirHostPath: "/rook"} - c := New(context, clusterInfo, spec, "myversion") - - node := "n1" - location := "root=default host=myhost zone=myzone" - osd1 := OSDInfo{ID: 3, UUID: "osd-uuid", BlockPath: "dev/logical-volume-path", CVMode: "raw", Location: location, TopologyAffinity: "topology.rook.io/rack=rack0"} - osd2 := OSDInfo{ID: 3, UUID: "osd-uuid", BlockPath: "vg1/lv1", CVMode: "lvm", LVBackedPV: true} - osd3 := OSDInfo{ID: 3, UUID: "osd-uuid", BlockPath: "", CVMode: "raw"} - osdProp := osdProperties{ - crushHostname: node, - pvc: corev1.PersistentVolumeClaimVolumeSource{ClaimName: "pvc"}, - selection: cephv1.Selection{}, - resources: corev1.ResourceRequirements{}, - storeConfig: config.StoreConfig{}, - portable: true, - } - dataPathMap := &provisionConfig{ - DataPathMap: opconfig.NewDatalessDaemonDataPathMap(c.clusterInfo.Namespace, c.spec.DataDirHostPath), - } - - t.Run("get info from PVC-based OSDs", func(t *testing.T) { - d1, _ := c.makeDeployment(osdProp, osd1, dataPathMap) - osdInfo1, _ := c.getOSDInfo(d1) - assert.Equal(t, osd1.ID, osdInfo1.ID) - assert.Equal(t, osd1.BlockPath, osdInfo1.BlockPath) - assert.Equal(t, osd1.CVMode, osdInfo1.CVMode) - assert.Equal(t, location, osdInfo1.Location) - assert.Equal(t, osd1.TopologyAffinity, osdInfo1.TopologyAffinity) - osdProp.portable = false - - d2, _ := c.makeDeployment(osdProp, osd2, dataPathMap) - osdInfo2, _ := c.getOSDInfo(d2) - assert.Equal(t, osd2.ID, osdInfo2.ID) - assert.Equal(t, osd2.BlockPath, osdInfo2.BlockPath) - assert.Equal(t, osd2.CVMode, osdInfo2.CVMode) - assert.Equal(t, osd2.LVBackedPV, osdInfo2.LVBackedPV) - - // make deployment fails if block path is empty. allow it to create a valid deployment, then - // set the deployment to have bad info - d3, err := c.makeDeployment(osdProp, osd3, dataPathMap) - assert.NoError(t, err) - d3.Spec.Template.Spec.Containers[0].Env = append(d3.Spec.Template.Spec.Containers[0].Env, - corev1.EnvVar{Name: blockPathVarName, Value: ""}) - _, err = c.getOSDInfo(d3) - assert.Error(t, err) - }) - - t.Run("get info from node-based OSDs", func(t *testing.T) { - useAllDevices := true - osd4 := OSDInfo{ID: 3, UUID: "osd-uuid", BlockPath: "", CVMode: "lvm", Location: location} - osd5 := OSDInfo{ID: 3, UUID: "osd-uuid", BlockPath: "vg1/lv1", CVMode: "lvm"} - osdProp = osdProperties{ - crushHostname: node, - devices: []cephv1.Device{}, - pvc: corev1.PersistentVolumeClaimVolumeSource{}, - selection: cephv1.Selection{ - UseAllDevices: &useAllDevices, - }, - resources: corev1.ResourceRequirements{}, - storeConfig: config.StoreConfig{}, - metadataDevice: "", - } - - d4, _ := c.makeDeployment(osdProp, osd4, dataPathMap) - osdInfo4, _ := c.getOSDInfo(d4) - assert.Equal(t, osd4.ID, osdInfo4.ID) - assert.Equal(t, location, osdInfo4.Location) - - d5, _ := c.makeDeployment(osdProp, osd5, dataPathMap) - osdInfo5, _ := c.getOSDInfo(d5) - assert.Equal(t, osd5.ID, osdInfo5.ID) - assert.Equal(t, osd5.CVMode, osdInfo5.CVMode) - }) -} - -func TestGetPreparePlacement(t *testing.T) { - // no placement - prop := osdProperties{} - result := prop.getPreparePlacement() - assert.Nil(t, result.NodeAffinity) - - // the osd daemon placement is specified - prop.placement = cephv1.Placement{NodeAffinity: &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "label1", - Operator: corev1.NodeSelectorOpIn, - Values: []string{"bar", "baz"}, - }, - }, - }, - }, - }, - }, - } - - result = prop.getPreparePlacement() - assert.Equal(t, 1, len(result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms)) - assert.Equal(t, "label1", result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key) - - // The prepare placement is specified and takes precedence over the osd placement - prop.preparePlacement = &cephv1.Placement{NodeAffinity: &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "label2", - Operator: corev1.NodeSelectorOpIn, - Values: []string{"foo", "bar"}, - }, - }, - }, - }, - }, - }, - } - result = prop.getPreparePlacement() - assert.Equal(t, 1, len(result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms)) - assert.Equal(t, "label2", result.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key) -} - -func TestDetectCrushLocation(t *testing.T) { - location := []string{"host=foo"} - nodeLabels := map[string]string{} - - // no change to the location if there are no labels - updateLocationWithNodeLabels(&location, nodeLabels) - assert.Equal(t, 1, len(location)) - assert.Equal(t, "host=foo", location[0]) - - // no change to the location if an invalid label or invalid topology - nodeLabels = map[string]string{ - "topology.rook.io/foo": "bar", - "invalid.topology.rook.io/rack": "r1", - "topology.rook.io/zone": "z1", - } - updateLocationWithNodeLabels(&location, nodeLabels) - assert.Equal(t, 1, len(location)) - assert.Equal(t, "host=foo", location[0]) - - // update the location with valid topology labels - nodeLabels = map[string]string{ - "failure-domain.beta.kubernetes.io/region": "region1", - "failure-domain.beta.kubernetes.io/zone": "zone1", - "topology.rook.io/rack": "rack1", - "topology.rook.io/row": "row1", - } - - expected := []string{ - "host=foo", - "rack=rack1", - "region=region1", - "row=row1", - "zone=zone1", - } - updateLocationWithNodeLabels(&location, nodeLabels) - - assert.Equal(t, 5, len(location)) - for i, locString := range location { - assert.Equal(t, locString, expected[i]) - } -} - -func TestGetOSDInfoWithCustomRoot(t *testing.T) { - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns"} - clusterInfo.SetName("test") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - context := &clusterd.Context{} - spec := cephv1.ClusterSpec{ - DataDirHostPath: "/rook", - Storage: cephv1.StorageScopeSpec{ - Config: map[string]string{ - "crushRoot": "custom-root", - }, - }, - } - c := New(context, clusterInfo, spec, "myversion") - - node := "n1" - location := "root=custom-root host=myhost zone=myzone" - osd1 := OSDInfo{ID: 3, UUID: "osd-uuid", BlockPath: "dev/logical-volume-path", CVMode: "raw", Location: location} - osd2 := OSDInfo{ID: 3, UUID: "osd-uuid", BlockPath: "vg1/lv1", CVMode: "lvm", LVBackedPV: true, Location: location} - osd3 := OSDInfo{ID: 3, UUID: "osd-uuid", BlockPath: "", CVMode: "lvm", Location: location} - osdProp := osdProperties{ - crushHostname: node, - pvc: corev1.PersistentVolumeClaimVolumeSource{ClaimName: "pvc"}, - selection: cephv1.Selection{}, - resources: corev1.ResourceRequirements{}, - storeConfig: config.StoreConfig{}, - } - dataPathMap := &provisionConfig{ - DataPathMap: opconfig.NewDatalessDaemonDataPathMap(c.clusterInfo.Namespace, c.spec.DataDirHostPath), - } - d1, _ := c.makeDeployment(osdProp, osd1, dataPathMap) - osdInfo1, _ := c.getOSDInfo(d1) - assert.Equal(t, osd1.ID, osdInfo1.ID) - assert.Equal(t, osd1.BlockPath, osdInfo1.BlockPath) - assert.Equal(t, osd1.CVMode, osdInfo1.CVMode) - assert.Equal(t, location, osdInfo1.Location) - - d2, _ := c.makeDeployment(osdProp, osd2, dataPathMap) - osdInfo2, _ := c.getOSDInfo(d2) - assert.Equal(t, osd2.ID, osdInfo2.ID) - assert.Equal(t, osd2.BlockPath, osdInfo2.BlockPath) - assert.Equal(t, osd2.CVMode, osdInfo2.CVMode) - assert.Equal(t, osd2.LVBackedPV, osdInfo2.LVBackedPV) - assert.Equal(t, location, osdInfo2.Location) - - d3, _ := c.makeDeployment(osdProp, osd3, dataPathMap) - _, err := c.getOSDInfo(d3) - assert.Error(t, err) -} diff --git a/pkg/operator/ceph/cluster/osd/provision_spec.go b/pkg/operator/ceph/cluster/osd/provision_spec.go deleted file mode 100644 index 396c1adfb..000000000 --- a/pkg/operator/ceph/cluster/osd/provision_spec.go +++ /dev/null @@ -1,318 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "encoding/json" - "fmt" - "path" - - "github.com/libopenstorage/secrets" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - kms "github.com/rook/rook/pkg/daemon/ceph/osd/kms" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - batch "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (c *Cluster) makeJob(osdProps osdProperties, provisionConfig *provisionConfig) (*batch.Job, error) { - podSpec, err := c.provisionPodTemplateSpec(osdProps, v1.RestartPolicyOnFailure, provisionConfig) - if err != nil { - return nil, err - } - - if !osdProps.onPVC() { - podSpec.Spec.NodeSelector = map[string]string{v1.LabelHostname: osdProps.crushHostname} - } else { - // This is not needed in raw mode and 14.2.8 brings it - // but we still want to do this not to lose backward compatibility with lvm based OSDs... - podSpec.Spec.InitContainers = append(podSpec.Spec.InitContainers, c.getPVCInitContainer(osdProps)) - if osdProps.onPVCWithMetadata() { - podSpec.Spec.InitContainers = append(podSpec.Spec.InitContainers, c.getPVCMetadataInitContainer("/srv", osdProps)) - } - if osdProps.onPVCWithWal() { - podSpec.Spec.InitContainers = append(podSpec.Spec.InitContainers, c.getPVCWalInitContainer("/wal", osdProps)) - } - } - - job := &batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: k8sutil.TruncateNodeName(prepareAppNameFmt, osdProps.crushHostname), - Namespace: c.clusterInfo.Namespace, - Labels: map[string]string{ - k8sutil.AppAttr: prepareAppName, - k8sutil.ClusterAttr: c.clusterInfo.Namespace, - }, - }, - Spec: batch.JobSpec{ - Template: *podSpec, - }, - } - - if osdProps.onPVC() { - k8sutil.AddLabelToJob(OSDOverPVCLabelKey, osdProps.pvc.ClaimName, job) - k8sutil.AddLabelToJob(CephDeviceSetLabelKey, osdProps.deviceSetName, job) - k8sutil.AddLabelToPod(OSDOverPVCLabelKey, osdProps.pvc.ClaimName, &job.Spec.Template) - k8sutil.AddLabelToPod(CephDeviceSetLabelKey, osdProps.deviceSetName, &job.Spec.Template) - } - - k8sutil.AddRookVersionLabelToJob(job) - controller.AddCephVersionLabelToJob(c.clusterInfo.CephVersion, job) - err = c.clusterInfo.OwnerInfo.SetControllerReference(job) - if err != nil { - return nil, err - } - - // override the resources of all the init containers and main container with the expected osd prepare resources - c.applyResourcesToAllContainers(&podSpec.Spec, cephv1.GetPrepareOSDResources(c.spec.Resources)) - return job, nil -} - -// applyResourcesToAllContainers applies consistent resource requests for all containers and all init containers in the pod -func (c *Cluster) applyResourcesToAllContainers(spec *v1.PodSpec, resources v1.ResourceRequirements) { - for i := range spec.InitContainers { - spec.InitContainers[i].Resources = resources - } - for i := range spec.Containers { - spec.Containers[i].Resources = resources - } -} - -func (c *Cluster) provisionPodTemplateSpec(osdProps osdProperties, restart v1.RestartPolicy, provisionConfig *provisionConfig) (*v1.PodTemplateSpec, error) { - copyBinariesVolume, copyBinariesContainer := c.getCopyBinariesContainer() - - // ceph-volume is currently set up to use /etc/ceph/ceph.conf; this means no user config - // overrides will apply to ceph-volume, but this is unnecessary anyway - volumes := append(controller.PodVolumes(provisionConfig.DataPathMap, c.spec.DataDirHostPath, true), copyBinariesVolume) - - // create a volume on /dev so the pod can access devices on the host - devVolume := v1.Volume{Name: "devices", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/dev"}}} - volumes = append(volumes, devVolume) - udevVolume := v1.Volume{Name: "udev", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/run/udev"}}} - volumes = append(volumes, udevVolume) - - // If not running on PVC we mount the rootfs of the host to validate the presence of the LVM package - if !osdProps.onPVC() { - rootFSVolume := v1.Volume{Name: "rootfs", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/"}}} - volumes = append(volumes, rootFSVolume) - } - - if osdProps.onPVC() { - // Create volume config for PVCs - volumes = append(volumes, getPVCOSDVolumes(&osdProps, c.spec.DataDirHostPath, c.clusterInfo.Namespace, true)...) - if osdProps.encrypted { - // If a KMS is configured we populate - if c.spec.Security.KeyManagementService.IsEnabled() { - kmsProvider := kms.GetParam(c.spec.Security.KeyManagementService.ConnectionDetails, kms.Provider) - if kmsProvider == secrets.TypeVault { - volumeTLS, _ := kms.VaultVolumeAndMount(c.spec.Security.KeyManagementService.ConnectionDetails) - volumes = append(volumes, volumeTLS) - } - } - } - } - - if len(volumes) == 0 { - return nil, errors.New("empty volumes") - } - - provisionContainer, err := c.provisionOSDContainer(osdProps, copyBinariesContainer.VolumeMounts[0], provisionConfig) - if err != nil { - return nil, errors.Wrap(err, "failed to generate OSD provisioning container") - } - - podSpec := v1.PodSpec{ - ServiceAccountName: serviceAccountName, - InitContainers: []v1.Container{ - *copyBinariesContainer, - }, - Containers: []v1.Container{ - provisionContainer, - }, - RestartPolicy: restart, - Volumes: volumes, - HostNetwork: c.spec.Network.IsHost(), - PriorityClassName: cephv1.GetOSDPriorityClassName(c.spec.PriorityClassNames), - SchedulerName: osdProps.schedulerName, - } - if c.spec.Network.IsHost() { - podSpec.DNSPolicy = v1.DNSClusterFirstWithHostNet - } - if osdProps.onPVC() { - c.applyAllPlacementIfNeeded(&podSpec) - // apply storageClassDeviceSets.preparePlacement - osdProps.getPreparePlacement().ApplyToPodSpec(&podSpec) - } else { - c.applyAllPlacementIfNeeded(&podSpec) - // apply spec.placement.prepareosd - c.spec.Placement[cephv1.KeyOSDPrepare].ApplyToPodSpec(&podSpec) - } - - k8sutil.RemoveDuplicateEnvVars(&podSpec) - - podMeta := metav1.ObjectMeta{ - Name: AppName, - Labels: map[string]string{ - k8sutil.AppAttr: prepareAppName, - k8sutil.ClusterAttr: c.clusterInfo.Namespace, - OSDOverPVCLabelKey: osdProps.pvc.ClaimName, - }, - Annotations: map[string]string{}, - } - - cephv1.GetOSDPrepareAnnotations(c.spec.Annotations).ApplyToObjectMeta(&podMeta) - cephv1.GetOSDPrepareLabels(c.spec.Labels).ApplyToObjectMeta(&podMeta) - - // ceph-volume --dmcrypt uses cryptsetup that synchronizes with udev on - // host through semaphore - podSpec.HostIPC = osdProps.storeConfig.EncryptedDevice || osdProps.encrypted - - return &v1.PodTemplateSpec{ - ObjectMeta: podMeta, - Spec: podSpec, - }, nil -} - -func (c *Cluster) provisionOSDContainer(osdProps osdProperties, copyBinariesMount v1.VolumeMount, provisionConfig *provisionConfig) (v1.Container, error) { - envVars := c.getConfigEnvVars(osdProps, k8sutil.DataDir) - - // enable debug logging in the prepare job - envVars = append(envVars, setDebugLogLevelEnvVar(true)) - - // only 1 of device list, device filter, device path filter and use all devices can be specified. We prioritize in that order. - if len(osdProps.devices) > 0 { - configuredDevices := []config.ConfiguredDevice{} - for _, device := range osdProps.devices { - id := device.Name - if device.FullPath != "" { - id = device.FullPath - } - cd := config.ConfiguredDevice{ - ID: id, - StoreConfig: config.ToStoreConfig(device.Config), - } - configuredDevices = append(configuredDevices, cd) - } - marshalledDevices, err := json.Marshal(configuredDevices) - if err != nil { - return v1.Container{}, errors.Wrapf(err, "failed to JSON marshal configured devices for node %q", osdProps.crushHostname) - } - envVars = append(envVars, dataDevicesEnvVar(string(marshalledDevices))) - } else if osdProps.selection.DeviceFilter != "" { - envVars = append(envVars, deviceFilterEnvVar(osdProps.selection.DeviceFilter)) - } else if osdProps.selection.DevicePathFilter != "" { - envVars = append(envVars, devicePathFilterEnvVar(osdProps.selection.DevicePathFilter)) - } else if osdProps.selection.GetUseAllDevices() { - envVars = append(envVars, deviceFilterEnvVar("all")) - } - envVars = append(envVars, v1.EnvVar{Name: "ROOK_CEPH_VERSION", Value: c.clusterInfo.CephVersion.CephVersionFormatted()}) - envVars = append(envVars, crushDeviceClassEnvVar(osdProps.storeConfig.DeviceClass)) - envVars = append(envVars, crushInitialWeightEnvVar(osdProps.storeConfig.InitialWeight)) - - if osdProps.metadataDevice != "" { - envVars = append(envVars, metadataDeviceEnvVar(osdProps.metadataDevice)) - } - - volumeMounts := append(controller.CephVolumeMounts(provisionConfig.DataPathMap, true), []v1.VolumeMount{ - {Name: "devices", MountPath: "/dev"}, - {Name: "udev", MountPath: "/run/udev"}, - copyBinariesMount, - }...) - - // If not running on PVC we mount the rootfs of the host to validate the presence of the LVM package - if !osdProps.onPVC() { - volumeMounts = append(volumeMounts, v1.VolumeMount{Name: "rootfs", MountPath: "/rootfs", ReadOnly: true}) - } - - // If the OSD runs on PVC - if osdProps.onPVC() { - volumeMounts = append(volumeMounts, getPvcOSDBridgeMount(osdProps.pvc.ClaimName)) - // The device list is read by the Rook CLI via environment variables so let's add them - configuredDevices := []config.ConfiguredDevice{ - { - ID: fmt.Sprintf("/mnt/%s", osdProps.pvc.ClaimName), - StoreConfig: config.NewStoreConfig(), - }, - } - if osdProps.onPVCWithMetadata() { - volumeMounts = append(volumeMounts, getPvcMetadataOSDBridgeMount(osdProps.metadataPVC.ClaimName)) - configuredDevices = append(configuredDevices, - config.ConfiguredDevice{ - ID: fmt.Sprintf("/srv/%s", osdProps.metadataPVC.ClaimName), - StoreConfig: config.NewStoreConfig(), - }) - } - if osdProps.onPVCWithWal() { - volumeMounts = append(volumeMounts, getPvcWalOSDBridgeMount(osdProps.walPVC.ClaimName)) - configuredDevices = append(configuredDevices, - config.ConfiguredDevice{ - ID: fmt.Sprintf("/wal/%s", osdProps.walPVC.ClaimName), - StoreConfig: config.NewStoreConfig(), - }) - } - marshalledDevices, err := json.Marshal(configuredDevices) - if err != nil { - return v1.Container{}, errors.Wrapf(err, "failed to JSON marshal configured devices for PVC %q", osdProps.crushHostname) - } - envVars = append(envVars, dataDevicesEnvVar(string(marshalledDevices))) - envVars = append(envVars, pvcBackedOSDEnvVar("true")) - envVars = append(envVars, encryptedDeviceEnvVar(osdProps.encrypted)) - envVars = append(envVars, pvcNameEnvVar(osdProps.pvc.ClaimName)) - - if osdProps.encrypted { - // If a KMS is configured we populate - if c.spec.Security.KeyManagementService.IsEnabled() { - kmsProvider := kms.GetParam(c.spec.Security.KeyManagementService.ConnectionDetails, kms.Provider) - if kmsProvider == secrets.TypeVault { - _, volumeMountsTLS := kms.VaultVolumeAndMount(c.spec.Security.KeyManagementService.ConnectionDetails) - volumeMounts = append(volumeMounts, volumeMountsTLS) - envVars = append(envVars, kms.VaultConfigToEnvVar(c.spec)...) - } - } else { - envVars = append(envVars, cephVolumeRawEncryptedEnvVarFromSecret(osdProps)) - } - } - } - - // run privileged always since we always mount /dev - privileged := true - runAsUser := int64(0) - runAsNonRoot := false - readOnlyRootFilesystem := false - - osdProvisionContainer := v1.Container{ - Command: []string{path.Join(rookBinariesMountPath, "tini")}, - Args: []string{"--", path.Join(rookBinariesMountPath, "rook"), "ceph", "osd", "provision"}, - Name: "provision", - Image: c.spec.CephVersion.Image, - VolumeMounts: volumeMounts, - Env: envVars, - SecurityContext: &v1.SecurityContext{ - Privileged: &privileged, - RunAsUser: &runAsUser, - RunAsNonRoot: &runAsNonRoot, - ReadOnlyRootFilesystem: &readOnlyRootFilesystem, - }, - Resources: cephv1.GetPrepareOSDResources(c.spec.Resources), - } - - return osdProvisionContainer, nil -} diff --git a/pkg/operator/ceph/cluster/osd/spec.go b/pkg/operator/ceph/cluster/osd/spec.go deleted file mode 100644 index 161f6729f..000000000 --- a/pkg/operator/ceph/cluster/osd/spec.go +++ /dev/null @@ -1,1264 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package osd for the Ceph OSDs. -package osd - -import ( - "fmt" - "path" - "path/filepath" - "strconv" - - "github.com/libopenstorage/secrets" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - kms "github.com/rook/rook/pkg/daemon/ceph/osd/kms" - opconfig "github.com/rook/rook/pkg/operator/ceph/config" - cephkey "github.com/rook/rook/pkg/operator/ceph/config/keyring" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - rookBinariesMountPath = "/rook" - rookBinariesVolumeName = "rook-binaries" - activateOSDVolumeName = "activate-osd" - activateOSDMountPath = "/var/lib/ceph/osd/ceph-" - blockPVCMapperInitContainer = "blkdevmapper" - blockEncryptionKMSGetKEKInitContainer = "encryption-kms-get-kek" - blockEncryptionOpenInitContainer = "encryption-open" - blockEncryptionOpenMetadataInitContainer = "encryption-open-metadata" - blockEncryptionOpenWalInitContainer = "encryption-open-wal" - blockPVCMapperEncryptionInitContainer = "blkdevmapper-encryption" - blockPVCMapperEncryptionMetadataInitContainer = "blkdevmapper-metadata-encryption" - blockPVCMapperEncryptionWalInitContainer = "blkdevmapper-wal-encryption" - blockPVCMetadataMapperInitContainer = "blkdevmapper-metadata" - blockPVCWalMapperInitContainer = "blkdevmapper-wal" - activatePVCOSDInitContainer = "activate" - expandPVCOSDInitContainer = "expand-bluefs" - expandEncryptedPVCOSDInitContainer = "expand-encrypted-bluefs" - encryptedPVCStatusOSDInitContainer = "encrypted-block-status" - encryptionKeyFileName = "luks_key" - // DmcryptBlockType is a portion of the device mapper name for the encrypted OSD on PVC block.db (rocksdb db) - DmcryptBlockType = "block-dmcrypt" - // DmcryptMetadataType is a portion of the device mapper name for the encrypted OSD on PVC block - DmcryptMetadataType = "db-dmcrypt" - // DmcryptWalType is a portion of the device mapper name for the encrypted OSD on PVC wal - DmcryptWalType = "wal-dmcrypt" - bluestoreBlockName = "block" - bluestoreMetadataName = "block.db" - bluestoreWalName = "block.wal" -) - -const ( - activateOSDOnNodeCode = ` -set -o errexit -set -o pipefail -set -o nounset # fail if variables are unset -set -o xtrace - -OSD_ID="$ROOK_OSD_ID" -OSD_UUID=%s -OSD_STORE_FLAG="%s" -OSD_DATA_DIR=/var/lib/ceph/osd/ceph-"$OSD_ID" -CV_MODE=%s -DEVICE="$%s" - -# create new keyring -ceph -n client.admin auth get-or-create osd."$OSD_ID" mon 'allow profile osd' mgr 'allow profile osd' osd 'allow *' -k /etc/ceph/admin-keyring-store/keyring - -# active the osd with ceph-volume -if [[ "$CV_MODE" == "lvm" ]]; then - TMP_DIR=$(mktemp -d) - - # activate osd - ceph-volume lvm activate --no-systemd "$OSD_STORE_FLAG" "$OSD_ID" "$OSD_UUID" - - # copy the tmpfs directory to a temporary directory - # this is needed because when the init container exits, the tmpfs goes away and its content with it - # this will result in the emptydir to be empty when accessed by the main osd container - cp --verbose --no-dereference "$OSD_DATA_DIR"/* "$TMP_DIR"/ - - # unmount the tmpfs since we don't need it anymore - umount "$OSD_DATA_DIR" - - # copy back the content of the tmpfs into the original osd directory - cp --verbose --no-dereference "$TMP_DIR"/* "$OSD_DATA_DIR" - - # retain ownership of files to the ceph user/group - chown --verbose --recursive ceph:ceph "$OSD_DATA_DIR" - - # remove the temporary directory - rm --recursive --force "$TMP_DIR" -else - # 'ceph-volume raw list' (which the osd-prepare job uses to report OSDs on nodes) - # returns user-friendly device names which can change when systems reboot. To - # keep OSD pods from crashing repeatedly after a reboot, we need to check if the - # block device we have is still correct, and if it isn't correct, we need to - # scan all the disks to find the right one. - OSD_LIST="$(mktemp)" - - function find_device() { - # jq would be preferable, but might be removed for hardened Ceph images - # python3 should exist in all containers having Ceph - python3 -c " -import sys, json -for _, info in json.load(sys.stdin).items(): - if info['osd_id'] == $OSD_ID: - print(info['device'], end='') - print('found device: ' + info['device'], file=sys.stderr) # log the disk we found to stderr - sys.exit(0) # don't keep processing once the disk is found -sys.exit('no disk found with OSD ID $OSD_ID') -" - } - - ceph-volume raw list "$DEVICE" > "$OSD_LIST" - cat "$OSD_LIST" - - if ! find_device < "$OSD_LIST"; then - ceph-volume raw list > "$OSD_LIST" - cat "$OSD_LIST" - - DEVICE="$(find_device < "$OSD_LIST")" - fi - [[ -z "$DEVICE" ]] && { echo "no device" ; exit 1 ; } - - # ceph-volume raw mode only supports bluestore so we don't need to pass a store flag - ceph-volume raw activate --device "$DEVICE" --no-systemd --no-tmpfs -fi -` - - openEncryptedBlock = ` -set -xe - -CEPH_FSID=%s -PVC_NAME=%s -KEY_FILE_PATH=%s -BLOCK_PATH=%s -DM_NAME=%s -DM_PATH=%s - -# Helps debugging -dmsetup version - -function open_encrypted_block { - echo "Opening encrypted device $BLOCK_PATH at $DM_PATH" - cryptsetup luksOpen --verbose --disable-keyring --allow-discards --key-file "$KEY_FILE_PATH" "$BLOCK_PATH" "$DM_NAME" - rm -f "$KEY_FILE_PATH" -} - -# This is done for upgraded clusters that did not have the subsystem and label set by the prepare job -function set_luks_subsystem_and_label { - echo "setting LUKS label and subsystem" - cryptsetup config $BLOCK_PATH --subsystem ceph_fsid="$CEPH_FSID" --label pvc_name="$PVC_NAME" -} - -if [ -b "$DM_PATH" ]; then - echo "Encrypted device $BLOCK_PATH already opened at $DM_PATH" - for field in $(dmsetup table "$DM_NAME"); do - if [[ "$field" =~ ^[0-9]+\:[0-9]+ ]]; then - underlaying_block="/sys/dev/block/$field" - if [ ! -d "$underlaying_block" ]; then - echo "Underlying block device $underlaying_block of crypt $DM_NAME disappeared!" - echo "Removing stale dm device $DM_NAME" - dmsetup remove --force "$DM_NAME" - open_encrypted_block - fi - fi - done -else - open_encrypted_block -fi - -# Setting label and subsystem on LUKS1 is not supported and the command will fail -if cryptsetup luksDump $BLOCK_PATH|grep -qEs "Version:.*2"; then - set_luks_subsystem_and_label -else - echo "LUKS version is not 2 so not setting label and subsystem" -fi -` - // #nosec G101 no leak just variable names - getKEKFromVaultWithToken = ` -# DO NOT RUN WITH -x TO AVOID LEAKING VAULT_TOKEN -set -e - -KEK_NAME=%s -KEY_PATH=%s -CURL_PAYLOAD=$(mktemp) -ARGS=(--silent --show-error --request GET --header "X-Vault-Token: ${VAULT_TOKEN//[$'\t\r\n']}") -PYTHON_DATA_PARSE="['data']" - -# If a vault namespace is set -if [ -n "$VAULT_NAMESPACE" ]; then - ARGS+=(--header "X-Vault-Namespace: ${VAULT_NAMESPACE}") -fi - -# If SSL is configured but self-signed CA is used -if [ -n "$VAULT_SKIP_VERIFY" ] && [[ "$VAULT_SKIP_VERIFY" == "true" ]]; then - ARGS+=(--insecure) -fi - -# TLS args -if [ -n "$VAULT_CACERT" ]; then - if [ -z "$VAULT_CLIENT_CERT" ] && [ -z "$VAULT_CLIENT_KEY" ]; then - ARGS+=(--cacert "${VAULT_CACERT}") - else - ARGS+=(--capath $(dirname "${VAULT_CACERT}")) - fi -fi -if [ -n "$VAULT_CLIENT_CERT" ]; then - ARGS+=(--cert "${VAULT_CLIENT_CERT}") -fi -if [ -n "$VAULT_CLIENT_KEY" ]; then - ARGS+=(--key "${VAULT_CLIENT_KEY}") -fi - -# For a request to any host/port, connect to VAULT_TLS_SERVER_NAME:requests original port instead -# Used for SNI validation and correct certificate matching -if [ -n "$VAULT_TLS_SERVER_NAME" ]; then - ARGS+=(--connect-to ::"${VAULT_TLS_SERVER_NAME}":) -fi - -# trim VAULT_BACKEND_PATH for last character '/' to avoid a redirect response from the server -VAULT_BACKEND_PATH="${VAULT_BACKEND_PATH%%/}" - -# Check KV engine version -if [[ "$VAULT_BACKEND" == "v2" ]]; then - PYTHON_DATA_PARSE="['data']['data']" - VAULT_BACKEND_PATH="$VAULT_BACKEND_PATH/data" -fi - - -# Get the Key Encryption Key -curl "${ARGS[@]}" "$VAULT_ADDR"/v1/"$VAULT_BACKEND_PATH"/"$KEK_NAME" > "$CURL_PAYLOAD" - -# Check for warnings in the payload -if warning=$(python3 -c "import sys, json; print(json.load(sys.stdin)[\"warnings\"], end='')" 2> /dev/null < "$CURL_PAYLOAD"); then - if [[ "$warning" != None ]]; then - # We could get a warning but it is not necessary an issue, so if there is no key we exit - if ! python3 -c "import sys, json; print(json.load(sys.stdin)${PYTHON_DATA_PARSE}[\"$KEK_NAME\"], end='')" &> /dev/null < "$CURL_PAYLOAD"; then - echo "no encryption key $KEK_NAME present in vault" - echo "$warning" - exit 1 - fi - fi -fi - -# Check for errors in the payload -if error=$(python3 -c "import sys, json; print(json.load(sys.stdin)[\"errors\"], end='')" 2> /dev/null < "$CURL_PAYLOAD"); then - echo "$error" - exit 1 -fi - -# Put the KEK in a file for cryptsetup to read -python3 -c "import sys, json; print(json.load(sys.stdin)${PYTHON_DATA_PARSE}[\"$KEK_NAME\"], end='')" < "$CURL_PAYLOAD" > "$KEY_PATH" - -# purge payload file -rm -f "$CURL_PAYLOAD" -` - - // If the disk identifier changes (different major and minor) we must force copy - // --remove-destination will remove each existing destination file before attempting to open it - // We **MUST** do this otherwise in environment where PVCs are dynamic, restarting the deployment will cause conflicts - // When restarting the OSD, the PVC block might end up with a different Kernel disk allocation - // For instance, prior to restart the block was mapped to 8:32 and when re-attached it was on 8:16 - // The previous "block" is still 8:32 so if we don't override it we will try to initialize on a disk that is not an OSD or worse another OSD - // This is mainly because in https://github.com/rook/rook/commit/ae8dcf7cc3b51cf8ca7da22f48b7a58887536c4f we switched to use HostPath to store the OSD data - // Since HostPath is not ephemeral, the block file must be re-hydrated each time the deployment starts - blockDevMapper = ` -set -xe - -PVC_SOURCE=%s -PVC_DEST=%s -CP_ARGS=(--archive --dereference --verbose) - -if [ -b "$PVC_DEST" ]; then - PVC_SOURCE_MAJ_MIN=$(stat --format '%%t%%T' $PVC_SOURCE) - PVC_DEST_MAJ_MIN=$(stat --format '%%t%%T' $PVC_DEST) - if [[ "$PVC_SOURCE_MAJ_MIN" == "$PVC_DEST_MAJ_MIN" ]]; then - CP_ARGS+=(--no-clobber) - else - echo "PVC's source major/minor numbers changed" - CP_ARGS+=(--remove-destination) - fi -fi - -cp "${CP_ARGS[@]}" "$PVC_SOURCE" "$PVC_DEST" -` -) - -// OSDs on PVC using a certain fast storage class need to do some tuning -var defaultTuneFastSettings = []string{ - "--osd-op-num-threads-per-shard=2", // Default value of osd_op_num_threads_per_shard for SSDs - "--osd-op-num-shards=8", // Default value of osd_op_num_shards for SSDs - "--osd-recovery-sleep=0", // Time in seconds to sleep before next recovery or backfill op for SSDs - "--osd-snap-trim-sleep=0", // Time in seconds to sleep before next snap trim for SSDs - "--osd-delete-sleep=0", // Time in seconds to sleep before next removal transaction for SSDs - "--bluestore-min-alloc-size=4096", // Default min_alloc_size value for SSDs - "--bluestore-prefer-deferred-size=0", // Default value of bluestore_prefer_deferred_size for SSDs - "--bluestore-compression-min-blob-size=8912", // Default value of bluestore_compression_min_blob_size for SSDs - "--bluestore-compression-max-blob-size=65536", // Default value of bluestore_compression_max_blob_size for SSDs - "--bluestore-max-blob-size=65536", // Default value of bluestore_max_blob_size for SSDs - "--bluestore-cache-size=3221225472", // Default value of bluestore_cache_size for SSDs - "--bluestore-throttle-cost-per-io=4000", // Default value of bluestore_throttle_cost_per_io for SSDs - "--bluestore-deferred-batch-ops=16", // Default value of bluestore_deferred_batch_ops for SSDs -} - -// OSDs on PVC using a certain slow storage class need to do some tuning -var defaultTuneSlowSettings = []string{ - "--osd-recovery-sleep=0.1", // Time in seconds to sleep before next recovery or backfill op - "--osd-snap-trim-sleep=2", // Time in seconds to sleep before next snap trim - "--osd-delete-sleep=2", // Time in seconds to sleep before next removal transaction -} - -func deploymentName(osdID int) string { - return fmt.Sprintf(osdAppNameFmt, osdID) -} - -func (c *Cluster) makeDeployment(osdProps osdProperties, osd OSDInfo, provisionConfig *provisionConfig) (*apps.Deployment, error) { - // If running on Octopus, we don't need to use the host PID namespace - var hostPID = !c.clusterInfo.CephVersion.IsAtLeastOctopus() - deploymentName := deploymentName(osd.ID) - replicaCount := int32(1) - volumeMounts := controller.CephVolumeMounts(provisionConfig.DataPathMap, false) - configVolumeMounts := controller.RookVolumeMounts(provisionConfig.DataPathMap, false) - // When running on PVC, the OSDs don't need a bindmount on dataDirHostPath, only the monitors do - dataDirHostPath := c.spec.DataDirHostPath - if osdProps.onPVC() { - dataDirHostPath = "" - } - volumes := controller.PodVolumes(provisionConfig.DataPathMap, dataDirHostPath, false) - failureDomainValue := osdProps.crushHostname - doConfigInit := true // initialize ceph.conf in init container? - doBinaryCopyInit := true // copy tini and rook binaries in an init container? - - // This property is used for both PVC and non-PVC use case - if osd.CVMode == "" { - return nil, errors.Errorf("failed to generate deployment for OSD %d. required CVMode is not specified for this OSD", osd.ID) - } - - dataDir := k8sutil.DataDir - // Create volume config for /dev so the pod can access devices on the host - // Only valid when running OSD on device or OSD on LV-backed PVC - if !osdProps.onPVC() || osd.CVMode == "lvm" { - devVolume := v1.Volume{Name: "devices", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/dev"}}} - volumes = append(volumes, devVolume) - devMount := v1.VolumeMount{Name: "devices", MountPath: "/dev"} - volumeMounts = append(volumeMounts, devMount) - } - - // If the OSD runs on PVC - if osdProps.onPVC() { - // Create volume config for PVCs - volumes = append(volumes, getPVCOSDVolumes(&osdProps, c.spec.DataDirHostPath, c.clusterInfo.Namespace, false)...) - // If encrypted let's add the secret key mount path - if osdProps.encrypted && osd.CVMode == "raw" { - encryptedVol, _ := c.getEncryptionVolume(osdProps) - volumes = append(volumes, encryptedVol) - // We don't need to pass the Volume with projection for TLS when TLS is not enabled - // Somehow when this happens and we try to update a deployment spec it fails with: - // ValidationError(Pod.spec.volumes[7].projected): missing required field "sources" - if c.spec.Security.KeyManagementService.IsEnabled() && c.spec.Security.KeyManagementService.IsTLSEnabled() { - encryptedVol, _ := kms.VaultVolumeAndMount(c.spec.Security.KeyManagementService.ConnectionDetails) - volumes = append(volumes, encryptedVol) - } - } - } - - if len(volumes) == 0 { - return nil, errors.New("empty volumes") - } - - osdID := strconv.Itoa(osd.ID) - tiniEnvVar := v1.EnvVar{Name: "TINI_SUBREAPER", Value: ""} - envVars := append(c.getConfigEnvVars(osdProps, dataDir), []v1.EnvVar{ - tiniEnvVar, - }...) - envVars = append(envVars, k8sutil.ClusterDaemonEnvVars(c.spec.CephVersion.Image)...) - envVars = append(envVars, []v1.EnvVar{ - {Name: "ROOK_OSD_UUID", Value: osd.UUID}, - {Name: "ROOK_OSD_ID", Value: osdID}, - {Name: "ROOK_CEPH_MON_HOST", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{LocalObjectReference: v1.LocalObjectReference{ - Name: "rook-ceph-config"}, - Key: "mon_host"}}}, - {Name: "CEPH_ARGS", Value: "-m $(ROOK_CEPH_MON_HOST)"}, - blockPathEnvVariable(osd.BlockPath), - cvModeEnvVariable(osd.CVMode), - dataDeviceClassEnvVar(osd.DeviceClass), - }...) - configEnvVars := append(c.getConfigEnvVars(osdProps, dataDir), []v1.EnvVar{ - tiniEnvVar, - {Name: "ROOK_OSD_ID", Value: osdID}, - {Name: "ROOK_CEPH_VERSION", Value: c.clusterInfo.CephVersion.CephVersionFormatted()}, - {Name: "ROOK_IS_DEVICE", Value: "true"}, - getTcmallocMaxTotalThreadCacheBytes(""), - }...) - - var command []string - var args []string - // If the OSD was prepared with ceph-volume and running on PVC and using the LVM mode - if osdProps.onPVC() && osd.CVMode == "lvm" { - // if the osd was provisioned by ceph-volume, we need to launch it with rook as the parent process - command = []string{path.Join(rookBinariesMountPath, "tini")} - args = []string{ - "--", path.Join(rookBinariesMountPath, "rook"), - "ceph", "osd", "start", - "--", - "--foreground", - "--id", osdID, - "--fsid", c.clusterInfo.FSID, - "--cluster", "ceph", - "--setuser", "ceph", - "--setgroup", "ceph", - fmt.Sprintf("--crush-location=%s", osd.Location), - } - } else if osdProps.onPVC() && osd.CVMode == "raw" { - doBinaryCopyInit = false - doConfigInit = false - command = []string{"ceph-osd"} - args = []string{ - "--foreground", - "--id", osdID, - "--fsid", c.clusterInfo.FSID, - "--setuser", "ceph", - "--setgroup", "ceph", - fmt.Sprintf("--crush-location=%s", osd.Location), - } - } else { - doBinaryCopyInit = false - doConfigInit = false - command = []string{"ceph-osd"} - args = []string{ - "--foreground", - "--id", osdID, - "--fsid", c.clusterInfo.FSID, - "--setuser", "ceph", - "--setgroup", "ceph", - fmt.Sprintf("--crush-location=%s", osd.Location), - } - } - - // Ceph expects initial weight as float value in tera-bytes units - if osdProps.storeConfig.InitialWeight != "" { - args = append(args, fmt.Sprintf("--osd-crush-initial-weight=%s", osdProps.storeConfig.InitialWeight)) - } - - // If the OSD runs on PVC - if osdProps.onPVC() { - // add the PVC size to the pod spec so that if the size changes the OSD will be restarted and pick up the change - envVars = append(envVars, v1.EnvVar{Name: "ROOK_OSD_PVC_SIZE", Value: osdProps.pvcSize}) - // if the pod is portable, keep track of the topology affinity - if osdProps.portable { - envVars = append(envVars, v1.EnvVar{Name: "ROOK_TOPOLOGY_AFFINITY", Value: osd.TopologyAffinity}) - } - - // Append slow tuning flag if necessary - if osdProps.tuneSlowDeviceClass { - args = append(args, defaultTuneSlowSettings...) - } else if osdProps.tuneFastDeviceClass { // Append fast tuning flag if necessary - args = append(args, defaultTuneFastSettings...) - } - } - - // The osd itself needs to talk to udev to report information about the device (vendor/serial etc) - udevVolume, udevVolumeMount := getUdevVolume() - volumes = append(volumes, udevVolume) - volumeMounts = append(volumeMounts, udevVolumeMount) - - // If the PV is encrypted let's mount the device mapper path - if osdProps.encrypted { - dmVol, dmVolMount := getDeviceMapperVolume() - volumes = append(volumes, dmVol) - volumeMounts = append(volumeMounts, dmVolMount) - } - - // Add the volume to the spec and the mount to the daemon container - copyBinariesVolume, copyBinariesContainer := c.getCopyBinariesContainer() - if doBinaryCopyInit { - volumes = append(volumes, copyBinariesVolume) - volumeMounts = append(volumeMounts, copyBinariesContainer.VolumeMounts[0]) - } - - // Add the volume to the spec and the mount to the daemon container - // so that it can pick the already mounted/activated osd metadata path - // This container will activate the OSD and place the activated filesinto an empty dir - // The empty dir will be shared by the "activate-osd" pod and the "osd" main pod - activateOSDVolume, activateOSDContainer := c.getActivateOSDInitContainer(c.spec.DataDirHostPath, c.clusterInfo.Namespace, osdID, osd, osdProps) - if !osdProps.onPVC() { - volumes = append(volumes, activateOSDVolume...) - volumeMounts = append(volumeMounts, activateOSDContainer.VolumeMounts[0]) - } - - args = append(args, opconfig.LoggingFlags()...) - args = append(args, osdOnSDNFlag(c.spec.Network)...) - args = append(args, controller.NetworkBindingFlags(c.clusterInfo, &c.spec)...) - - osdDataDirPath := activateOSDMountPath + osdID - if osdProps.onPVC() && osd.CVMode == "lvm" { - // Let's use the old bridge for these lvm based pvc osds - volumeMounts = append(volumeMounts, getPvcOSDBridgeMount(osdProps.pvc.ClaimName)) - envVars = append(envVars, pvcBackedOSDEnvVar("true")) - envVars = append(envVars, lvBackedPVEnvVar(strconv.FormatBool(osd.LVBackedPV))) - } - - if osdProps.onPVC() && osd.CVMode == "raw" { - volumeMounts = append(volumeMounts, getPvcOSDBridgeMountActivate(osdDataDirPath, osdProps.pvc.ClaimName)) - envVars = append(envVars, pvcBackedOSDEnvVar("true")) - } - - // We cannot go un-privileged until we have a bindmount for logs and crash - // OpenShift requires privileged containers for that - // If we remove those OSD on PVC with raw mode won't need to be privileged - // We could try to run as ceph too, more investigations needed - privileged := true - runAsUser := int64(0) - readOnlyRootFilesystem := false - securityContext := &v1.SecurityContext{ - Privileged: &privileged, - RunAsUser: &runAsUser, - ReadOnlyRootFilesystem: &readOnlyRootFilesystem, - } - - // needed for luksOpen synchronization when devices are encrypted and the osd is prepared with LVM - hostIPC := osdProps.storeConfig.EncryptedDevice || osdProps.encrypted - - initContainers := make([]v1.Container, 0, 4) - if doConfigInit { - initContainers = append(initContainers, - v1.Container{ - Args: []string{"ceph", "osd", "init"}, - Name: controller.ConfigInitContainerName, - Image: c.rookVersion, - VolumeMounts: configVolumeMounts, - Env: configEnvVars, - SecurityContext: securityContext, - }) - } - if doBinaryCopyInit { - initContainers = append(initContainers, *copyBinariesContainer) - } - - if osdProps.onPVC() && osd.CVMode == "lvm" { - initContainers = append(initContainers, c.getPVCInitContainer(osdProps)) - } else if osdProps.onPVC() && osd.CVMode == "raw" { - // Copy main block device to an empty dir - initContainers = append(initContainers, c.getPVCInitContainerActivate(osdDataDirPath, osdProps)) - // Copy main block.db device to an empty dir - if osdProps.onPVCWithMetadata() { - initContainers = append(initContainers, c.getPVCMetadataInitContainerActivate(osdDataDirPath, osdProps)) - } - // Copy main block.wal device to an empty dir - if osdProps.onPVCWithWal() { - initContainers = append(initContainers, c.getPVCWalInitContainerActivate(osdDataDirPath, osdProps)) - } - if osdProps.encrypted { - // Open the encrypted disk - initContainers = append(initContainers, c.getPVCEncryptionOpenInitContainerActivate(osdDataDirPath, osdProps)...) - // Copy the encrypted block to the osd data location, e,g: /var/lib/ceph/osd/ceph-0/block - initContainers = append(initContainers, c.getPVCEncryptionInitContainerActivate(osdDataDirPath, osdProps)...) - // Print the encrypted block status - initContainers = append(initContainers, c.getEncryptedStatusPVCInitContainer(osdDataDirPath, osdProps)) - // Resize the encrypted device if necessary, this must be done after the encrypted block is opened - initContainers = append(initContainers, c.getExpandEncryptedPVCInitContainer(osdDataDirPath, osdProps)) - } - initContainers = append(initContainers, c.getActivatePVCInitContainer(osdProps, osdID)) - initContainers = append(initContainers, c.getExpandPVCInitContainer(osdProps, osdID)) - } else { - initContainers = append(initContainers, *activateOSDContainer) - } - - // For OSD on PVC with LVM the directory does not exist yet - // It gets created by the 'ceph-volume lvm activate' command - // - // So OSD non-PVC the directory has been created by the 'activate' container already and has chown it - // So we don't need to chown it again - dataPath := "" - - // Raw mode on PVC needs this path so that OSD's metadata files can be chown after 'ceph-bluestore-tool' ran - if osd.CVMode == "raw" && osdProps.onPVC() { - dataPath = activateOSDMountPath + osdID - } - - // Doing a chown in a post start lifecycle hook does not reliably complete before the OSD - // process starts, which can cause the pod to fail without the lifecycle hook's chown command - // completing. It can take an arbitrarily long time for a pod restart to successfully chown the - // directory. This is a race condition for all OSDs; therefore, do this in an init container. - // See more discussion here: https://github.com/rook/rook/pull/3594#discussion_r312279176 - initContainers = append(initContainers, - controller.ChownCephDataDirsInitContainer( - opconfig.DataPathMap{ContainerDataDir: dataPath}, - c.spec.CephVersion.Image, - volumeMounts, - osdProps.resources, - securityContext, - )) - - podTemplateSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: AppName, - Labels: c.getOSDLabels(osd, failureDomainValue, osdProps.portable), - }, - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyAlways, - ServiceAccountName: serviceAccountName, - HostNetwork: c.spec.Network.IsHost(), - HostPID: hostPID, - HostIPC: hostIPC, - PriorityClassName: cephv1.GetOSDPriorityClassName(c.spec.PriorityClassNames), - InitContainers: initContainers, - Containers: []v1.Container{ - { - Command: command, - Args: args, - Name: "osd", - Image: c.spec.CephVersion.Image, - VolumeMounts: volumeMounts, - Env: envVars, - Resources: osdProps.resources, - SecurityContext: securityContext, - LivenessProbe: controller.GenerateLivenessProbeExecDaemon(opconfig.OsdType, osdID), - WorkingDir: opconfig.VarLogCephDir, - }, - }, - Volumes: volumes, - SchedulerName: osdProps.schedulerName, - }, - } - - // If the log collector is enabled we add the side-car container - if c.spec.LogCollector.Enabled { - // If HostPID is already enabled we don't need to activate shareProcessNamespace since all pods already see each others - if !podTemplateSpec.Spec.HostPID { - shareProcessNamespace := true - podTemplateSpec.Spec.ShareProcessNamespace = &shareProcessNamespace - } - podTemplateSpec.Spec.Containers = append(podTemplateSpec.Spec.Containers, *controller.LogCollectorContainer(fmt.Sprintf("ceph-osd.%s", osdID), c.clusterInfo.Namespace, c.spec)) - } - - // If the liveness probe is enabled - podTemplateSpec.Spec.Containers[0] = opconfig.ConfigureLivenessProbe(cephv1.KeyOSD, podTemplateSpec.Spec.Containers[0], c.spec.HealthCheck) - - if c.spec.Network.IsHost() { - podTemplateSpec.Spec.DNSPolicy = v1.DNSClusterFirstWithHostNet - } else if c.spec.Network.IsMultus() { - if err := k8sutil.ApplyMultus(c.spec.Network, &podTemplateSpec.ObjectMeta); err != nil { - return nil, err - } - } - - k8sutil.RemoveDuplicateEnvVars(&podTemplateSpec.Spec) - - deployment := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: c.clusterInfo.Namespace, - Labels: podTemplateSpec.Labels, - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - k8sutil.AppAttr: AppName, - k8sutil.ClusterAttr: c.clusterInfo.Namespace, - OsdIdLabelKey: fmt.Sprintf("%d", osd.ID), - }, - }, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - Template: podTemplateSpec, - Replicas: &replicaCount, - }, - } - if osdProps.onPVC() { - k8sutil.AddLabelToDeployment(OSDOverPVCLabelKey, osdProps.pvc.ClaimName, deployment) - k8sutil.AddLabelToDeployment(CephDeviceSetLabelKey, osdProps.deviceSetName, deployment) - k8sutil.AddLabelToPod(OSDOverPVCLabelKey, osdProps.pvc.ClaimName, &deployment.Spec.Template) - k8sutil.AddLabelToPod(CephDeviceSetLabelKey, osdProps.deviceSetName, &deployment.Spec.Template) - } - if !osdProps.portable { - deployment.Spec.Template.Spec.NodeSelector = map[string]string{v1.LabelHostname: osdProps.crushHostname} - } - // Replace default unreachable node toleration if the osd pod is portable and based in PVC - if osdProps.onPVC() && osdProps.portable { - k8sutil.AddUnreachableNodeToleration(&deployment.Spec.Template.Spec) - } - - k8sutil.AddRookVersionLabelToDeployment(deployment) - cephv1.GetOSDAnnotations(c.spec.Annotations).ApplyToObjectMeta(&deployment.ObjectMeta) - cephv1.GetOSDAnnotations(c.spec.Annotations).ApplyToObjectMeta(&deployment.Spec.Template.ObjectMeta) - cephv1.GetOSDLabels(c.spec.Labels).ApplyToObjectMeta(&deployment.ObjectMeta) - cephv1.GetOSDLabels(c.spec.Labels).ApplyToObjectMeta(&deployment.Spec.Template.ObjectMeta) - controller.AddCephVersionLabelToDeployment(c.clusterInfo.CephVersion, deployment) - controller.AddCephVersionLabelToDeployment(c.clusterInfo.CephVersion, deployment) - err := c.clusterInfo.OwnerInfo.SetControllerReference(deployment) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to osd deployment %q", deployment.Name) - } - - if osdProps.onPVC() { - c.applyAllPlacementIfNeeded(&deployment.Spec.Template.Spec) - // apply storageClassDeviceSets.Placement - osdProps.placement.ApplyToPodSpec(&deployment.Spec.Template.Spec) - } else { - c.applyAllPlacementIfNeeded(&deployment.Spec.Template.Spec) - // apply c.spec.Placement.osd - c.spec.Placement[cephv1.KeyOSD].ApplyToPodSpec(&deployment.Spec.Template.Spec) - } - - // portable OSDs must have affinity to the topology where the osd prepare job was executed - if osdProps.portable { - if err := applyTopologyAffinity(&deployment.Spec.Template.Spec, osd); err != nil { - return nil, err - } - } - - // Change TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES if the OSD has been annotated with a value - osdAnnotations := cephv1.GetOSDAnnotations(c.spec.Annotations) - tcmallocMaxTotalThreadCacheBytes, ok := osdAnnotations[tcmallocMaxTotalThreadCacheBytesEnv] - if ok && tcmallocMaxTotalThreadCacheBytes != "" { - deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, getTcmallocMaxTotalThreadCacheBytes(tcmallocMaxTotalThreadCacheBytes)) - } - - return deployment, nil -} - -// applyAllPlacementIfNeeded apply spec.placement.all if OnlyApplyOSDPlacement set to false -func (c *Cluster) applyAllPlacementIfNeeded(d *v1.PodSpec) { - // The placement for OSDs is computed from several different places: - // - For non-PVCs: `placement.all` and `placement.osd` - // - For PVCs: `placement.all` and inside the storageClassDeviceSet from the `placement` or `preparePlacement` - - // The placement from these sources will be merged by default (if onlyApplyOSDPlacement is false) in case of NodeAffinity and toleration, - // in case of other placement rule like PodAffinity, PodAntiAffinity... it will override last placement with the current placement applied, - // See ApplyToPodSpec(). - - // apply spec.placement.all when spec.Storage.OnlyApplyOSDPlacement is false - if !c.spec.Storage.OnlyApplyOSDPlacement { - c.spec.Placement.All().ApplyToPodSpec(d) - } -} - -func applyTopologyAffinity(spec *v1.PodSpec, osd OSDInfo) error { - if osd.TopologyAffinity == "" { - logger.Debugf("no topology affinity to set for osd %d", osd.ID) - return nil - } - logger.Infof("assigning osd %d topology affinity to %q", osd.ID, osd.TopologyAffinity) - nodeAffinity, err := k8sutil.GenerateNodeAffinity(osd.TopologyAffinity) - if err != nil { - return errors.Wrapf(err, "failed to generate osd %d topology affinity", osd.ID) - } - // merge the node affinity for the topology with the existing affinity - p := cephv1.Placement{NodeAffinity: nodeAffinity} - p.ApplyToPodSpec(spec) - - return nil -} - -// To get rook inside the container, the config init container needs to copy "tini" and "rook" binaries into a volume. -// Get the config flag so rook will copy the binaries and create the volume and mount that will be shared between -// the init container and the daemon container -func (c *Cluster) getCopyBinariesContainer() (v1.Volume, *v1.Container) { - volume := v1.Volume{Name: rookBinariesVolumeName, VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}} - mount := v1.VolumeMount{Name: rookBinariesVolumeName, MountPath: rookBinariesMountPath} - - return volume, &v1.Container{ - Args: []string{ - "copy-binaries", - "--copy-to-dir", rookBinariesMountPath}, - Name: "copy-bins", - Image: c.rookVersion, - VolumeMounts: []v1.VolumeMount{mount}, - } -} - -// This container runs all the actions needed to activate an OSD before we can run the OSD process -func (c *Cluster) getActivateOSDInitContainer(configDir, namespace, osdID string, osdInfo OSDInfo, osdProps osdProperties) ([]v1.Volume, *v1.Container) { - // We need to use hostPath because the same reason as written in the comment of getDataBridgeVolumeSource() - - hostPathType := v1.HostPathDirectoryOrCreate - source := v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: filepath.Join( - configDir, - namespace, - c.clusterInfo.FSID+"_"+osdInfo.UUID, - ), - Type: &hostPathType, - }, - } - volume := []v1.Volume{ - { - Name: activateOSDVolumeName, - VolumeSource: source, - }, - } - - adminKeyringVol, adminKeyringVolMount := cephkey.Volume().Admin(), cephkey.VolumeMount().Admin() - volume = append(volume, adminKeyringVol) - - envVars := append( - osdActivateEnvVar(), - blockPathEnvVariable(osdInfo.BlockPath), - metadataDeviceEnvVar(osdInfo.MetadataPath), - walDeviceEnvVar(osdInfo.WalPath), - v1.EnvVar{Name: "ROOK_OSD_ID", Value: osdID}, - ) - osdStore := "--bluestore" - - // Build empty dir osd path to something like "/var/lib/ceph/osd/ceph-0" - activateOSDMountPathID := activateOSDMountPath + osdID - - volMounts := []v1.VolumeMount{ - {Name: activateOSDVolumeName, MountPath: activateOSDMountPathID}, - {Name: "devices", MountPath: "/dev"}, - {Name: k8sutil.ConfigOverrideName, ReadOnly: true, MountPath: opconfig.EtcCephDir}, - } - volMounts = append(volMounts, adminKeyringVolMount) - - if osdProps.onPVC() { - volMounts = append(volMounts, getPvcOSDBridgeMount(osdProps.pvc.ClaimName)) - } - - container := &v1.Container{ - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(activateOSDOnNodeCode, osdInfo.UUID, osdStore, osdInfo.CVMode, blockPathVarName), - }, - Name: "activate", - Image: c.spec.CephVersion.Image, - VolumeMounts: volMounts, - SecurityContext: PrivilegedContext(), - Env: envVars, - Resources: osdProps.resources, - } - - return volume, container -} - -// Currently we can't mount a block mode pv directly to a privileged container -// So we mount it to a non privileged init container and then copy it to a common directory mounted inside init container -// and the privileged provision container. -func (c *Cluster) getPVCInitContainer(osdProps osdProperties) v1.Container { - return v1.Container{ - Name: blockPVCMapperInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(blockDevMapper, fmt.Sprintf("/%s", osdProps.pvc.ClaimName), fmt.Sprintf("/mnt/%s", osdProps.pvc.ClaimName)), - }, - VolumeDevices: []v1.VolumeDevice{ - { - Name: osdProps.pvc.ClaimName, - DevicePath: fmt.Sprintf("/%s", osdProps.pvc.ClaimName), - }, - }, - VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMount(osdProps.pvc.ClaimName)}, - SecurityContext: controller.PodSecurityContext(), - Resources: osdProps.resources, - } -} - -func (c *Cluster) getPVCInitContainerActivate(mountPath string, osdProps osdProperties) v1.Container { - cpDestinationName := path.Join(mountPath, bluestoreBlockName) - // Encrypted is a special - // We have an initial "cp" to copy the pvc to an empty dir, typically we copy it in /var/lib/ceph/osd/ceph-0/block - // BUT we encryption we need a second block copy, the copy of the opened encrypted block which ultimately will be at /var/lib/ceph/osd/ceph-0/block - // So when encrypted we first copy to /var/lib/ceph/osd/ceph-0/block-tmp - // Then open the encrypted block and finally copy it to /var/lib/ceph/osd/ceph-0/block - // If we don't do this "cp" will fail to copy the special block file - if osdProps.encrypted { - cpDestinationName = encryptionBlockDestinationCopy(mountPath, bluestoreBlockName) - } - - return v1.Container{ - Name: blockPVCMapperInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(blockDevMapper, fmt.Sprintf("/%s", osdProps.pvc.ClaimName), cpDestinationName), - }, - VolumeDevices: []v1.VolumeDevice{ - { - Name: osdProps.pvc.ClaimName, - DevicePath: fmt.Sprintf("/%s", osdProps.pvc.ClaimName), - }, - }, - VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, osdProps.pvc.ClaimName)}, - SecurityContext: controller.PodSecurityContext(), - Resources: osdProps.resources, - } -} - -func (c *Cluster) generateEncryptionOpenBlockContainer(resources v1.ResourceRequirements, containerName, pvcName, volumeMountPVCName, cryptBlockType, blockType, mountPath string) v1.Container { - return v1.Container{ - Name: containerName, - Image: c.spec.CephVersion.Image, - // Running via bash allows us to check whether the device is already opened or not - // If we don't the cryptsetup command will fail saying the device is already opened - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(openEncryptedBlock, c.clusterInfo.FSID, pvcName, encryptionKeyPath(), encryptionBlockDestinationCopy(mountPath, blockType), encryptionDMName(pvcName, cryptBlockType), encryptionDMPath(pvcName, cryptBlockType)), - }, - VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, volumeMountPVCName), getDeviceMapperMount()}, - SecurityContext: PrivilegedContext(), - Resources: resources, - } -} - -func (c *Cluster) generateVaultGetKEK(osdProps osdProperties) v1.Container { - return v1.Container{ - Name: blockEncryptionKMSGetKEKInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(getKEKFromVaultWithToken, kms.GenerateOSDEncryptionSecretName(osdProps.pvc.ClaimName), encryptionKeyPath()), - }, - Env: kms.VaultConfigToEnvVar(c.spec), - Resources: osdProps.resources, - } -} - -func (c *Cluster) getPVCEncryptionOpenInitContainerActivate(mountPath string, osdProps osdProperties) []v1.Container { - containers := []v1.Container{} - - // If a KMS is enabled we need to add an init container to fetch the KEK - if c.spec.Security.KeyManagementService.IsEnabled() { - kmsProvider := kms.GetParam(c.spec.Security.KeyManagementService.ConnectionDetails, kms.Provider) - // Get Vault KEK from KMS container - if kmsProvider == secrets.TypeVault { - if c.spec.Security.KeyManagementService.IsTokenAuthEnabled() { - getKEKFromKMSContainer := c.generateVaultGetKEK(osdProps) - - // Volume mount to store the encrypted key - _, volMount := c.getEncryptionVolume(osdProps) - getKEKFromKMSContainer.VolumeMounts = append(getKEKFromKMSContainer.VolumeMounts, volMount) - - // Now let's see if there is a TLS config we need to mount as well - if c.spec.Security.KeyManagementService.IsTLSEnabled() { - _, vaultVolMount := kms.VaultVolumeAndMount(c.spec.Security.KeyManagementService.ConnectionDetails) - getKEKFromKMSContainer.VolumeMounts = append(getKEKFromKMSContainer.VolumeMounts, vaultVolMount) - } - - // Add the container to the list of containers - containers = append(containers, getKEKFromKMSContainer) - } - } - } - - // Main block container - blockContainer := c.generateEncryptionOpenBlockContainer(osdProps.resources, blockEncryptionOpenInitContainer, osdProps.pvc.ClaimName, osdProps.pvc.ClaimName, DmcryptBlockType, bluestoreBlockName, mountPath) - _, volMount := c.getEncryptionVolume(osdProps) - blockContainer.VolumeMounts = append(blockContainer.VolumeMounts, volMount) - containers = append(containers, blockContainer) - - // If there is a metadata PVC - if osdProps.onPVCWithMetadata() { - metadataContainer := c.generateEncryptionOpenBlockContainer(osdProps.resources, blockEncryptionOpenMetadataInitContainer, osdProps.metadataPVC.ClaimName, osdProps.pvc.ClaimName, DmcryptMetadataType, bluestoreMetadataName, mountPath) - // We use the same key for both block and block.db so we must use osdProps.pvc.ClaimName for the getEncryptionVolume() - _, volMount := c.getEncryptionVolume(osdProps) - metadataContainer.VolumeMounts = append(metadataContainer.VolumeMounts, volMount) - containers = append(containers, metadataContainer) - } - - // If there is a wal PVC - if osdProps.onPVCWithWal() { - metadataContainer := c.generateEncryptionOpenBlockContainer(osdProps.resources, blockEncryptionOpenWalInitContainer, osdProps.walPVC.ClaimName, osdProps.pvc.ClaimName, DmcryptWalType, bluestoreWalName, mountPath) - // We use the same key for both block and block.db so we must use osdProps.pvc.ClaimName for the getEncryptionVolume() - _, volMount := c.getEncryptionVolume(osdProps) - metadataContainer.VolumeMounts = append(metadataContainer.VolumeMounts, volMount) - containers = append(containers, metadataContainer) - } - - return containers -} - -func (c *Cluster) generateEncryptionCopyBlockContainer(resources v1.ResourceRequirements, containerName, pvcName, mountPath, volumeMountPVCName, blockName, blockType string) v1.Container { - return v1.Container{ - Name: containerName, - Image: c.spec.CephVersion.Image, - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(blockDevMapper, encryptionDMPath(pvcName, blockType), path.Join(mountPath, blockName)), - }, - // volumeMountPVCName is crucial, especially when the block we copy is the metadata block - // its value must be the name of the block PV so that all init containers use the same bridge (the emptyDir shared by all the init containers) - VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, volumeMountPVCName), getDeviceMapperMount()}, - SecurityContext: controller.PodSecurityContext(), - Resources: resources, - } -} - -func (c *Cluster) getPVCEncryptionInitContainerActivate(mountPath string, osdProps osdProperties) []v1.Container { - containers := []v1.Container{} - containers = append(containers, c.generateEncryptionCopyBlockContainer(osdProps.resources, blockPVCMapperEncryptionInitContainer, osdProps.pvc.ClaimName, mountPath, osdProps.pvc.ClaimName, bluestoreBlockName, DmcryptBlockType)) - - // If there is a metadata PVC - if osdProps.metadataPVC.ClaimName != "" { - containers = append(containers, c.generateEncryptionCopyBlockContainer(osdProps.resources, blockPVCMapperEncryptionMetadataInitContainer, osdProps.metadataPVC.ClaimName, mountPath, osdProps.pvc.ClaimName, bluestoreMetadataName, DmcryptMetadataType)) - } - - // If there is a wal PVC - if osdProps.walPVC.ClaimName != "" { - containers = append(containers, c.generateEncryptionCopyBlockContainer(osdProps.resources, blockPVCMapperEncryptionWalInitContainer, osdProps.walPVC.ClaimName, mountPath, osdProps.pvc.ClaimName, bluestoreWalName, DmcryptWalType)) - } - - return containers -} - -// The reason why this is not part of getPVCInitContainer is that this will change the deployment spec object -// and thus restart the osd deployment, so it is better to have it separated and only enable it -// It will change the deployment spec because we must add a new argument to the method like 'mountPath' and use it in the container name -// otherwise we will end up with a new conflict during the job/deployment initialization -func (c *Cluster) getPVCMetadataInitContainer(mountPath string, osdProps osdProperties) v1.Container { - return v1.Container{ - Name: blockPVCMetadataMapperInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(blockDevMapper, fmt.Sprintf("/%s", osdProps.metadataPVC.ClaimName), fmt.Sprintf("/srv/%s", osdProps.metadataPVC.ClaimName)), - }, - VolumeDevices: []v1.VolumeDevice{ - { - Name: osdProps.metadataPVC.ClaimName, - DevicePath: fmt.Sprintf("/%s", osdProps.metadataPVC.ClaimName), - }, - }, - VolumeMounts: []v1.VolumeMount{ - { - MountPath: "/srv", - Name: fmt.Sprintf("%s-bridge", osdProps.metadataPVC.ClaimName), - }, - }, - SecurityContext: controller.PodSecurityContext(), - Resources: osdProps.resources, - } -} - -func (c *Cluster) getPVCMetadataInitContainerActivate(mountPath string, osdProps osdProperties) v1.Container { - cpDestinationName := path.Join(mountPath, bluestoreMetadataName) - // Encrypted is a special - // We have an initial "cp" to copy the pvc to an empty dir, typically we copy it in /var/lib/ceph/osd/ceph-0/block - // BUT we encryption we need a second block copy, the copy of the opened encrypted block which ultimately will be at /var/lib/ceph/osd/ceph-0/block - // So when encrypted we first copy to /var/lib/ceph/osd/ceph-0/block-tmp - // Then open the encrypted block and finally copy it to /var/lib/ceph/osd/ceph-0/block - // If we don't do this "cp" will fail to copy the special block file - if osdProps.encrypted { - cpDestinationName = encryptionBlockDestinationCopy(mountPath, bluestoreMetadataName) - } - - return v1.Container{ - Name: blockPVCMetadataMapperInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(blockDevMapper, fmt.Sprintf("/%s", osdProps.metadataPVC.ClaimName), cpDestinationName), - }, - VolumeDevices: []v1.VolumeDevice{ - { - Name: osdProps.metadataPVC.ClaimName, - DevicePath: fmt.Sprintf("/%s", osdProps.metadataPVC.ClaimName), - }, - }, - // We need to call getPvcOSDBridgeMountActivate() so that we can copy the metadata block into the "main" empty dir - // This empty dir is passed along every init container - VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, osdProps.pvc.ClaimName)}, - SecurityContext: controller.PodSecurityContext(), - Resources: osdProps.resources, - } -} - -func (c *Cluster) getPVCWalInitContainer(mountPath string, osdProps osdProperties) v1.Container { - return v1.Container{ - Name: blockPVCWalMapperInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(blockDevMapper, fmt.Sprintf("/%s", osdProps.walPVC.ClaimName), fmt.Sprintf("/wal/%s", osdProps.walPVC.ClaimName)), - }, - VolumeDevices: []v1.VolumeDevice{ - { - Name: osdProps.walPVC.ClaimName, - DevicePath: fmt.Sprintf("/%s", osdProps.walPVC.ClaimName), - }, - }, - VolumeMounts: []v1.VolumeMount{ - { - MountPath: "/wal", - Name: fmt.Sprintf("%s-bridge", osdProps.walPVC.ClaimName), - }, - }, - SecurityContext: controller.PodSecurityContext(), - Resources: osdProps.resources, - } -} - -func (c *Cluster) getPVCWalInitContainerActivate(mountPath string, osdProps osdProperties) v1.Container { - cpDestinationName := path.Join(mountPath, bluestoreWalName) - // Encrypted is a special - // We have an initial "cp" to copy the pvc to an empty dir, typically we copy it in /var/lib/ceph/osd/ceph-0/block - // BUT we encryption we need a second block copy, the copy of the opened encrypted block which ultimately will be at /var/lib/ceph/osd/ceph-0/block - // So when encrypted we first copy to /var/lib/ceph/osd/ceph-0/block-tmp - // Then open the encrypted block and finally copy it to /var/lib/ceph/osd/ceph-0/block - // If we don't do this "cp" will fail to copy the special block file - if osdProps.encrypted { - cpDestinationName = encryptionBlockDestinationCopy(mountPath, bluestoreWalName) - } - - return v1.Container{ - Name: blockPVCWalMapperInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(blockDevMapper, fmt.Sprintf("/%s", osdProps.walPVC.ClaimName), cpDestinationName), - }, - VolumeDevices: []v1.VolumeDevice{ - { - Name: osdProps.walPVC.ClaimName, - DevicePath: fmt.Sprintf("/%s", osdProps.walPVC.ClaimName), - }, - }, - // We need to call getPvcOSDBridgeMountActivate() so that we can copy the wal block into the "main" empty dir - // This empty dir is passed along every init container - VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, osdProps.pvc.ClaimName)}, - SecurityContext: controller.PodSecurityContext(), - Resources: osdProps.resources, - } -} - -func (c *Cluster) getActivatePVCInitContainer(osdProps osdProperties, osdID string) v1.Container { - osdDataPath := activateOSDMountPath + osdID - osdDataBlockPath := path.Join(osdDataPath, "block") - - container := v1.Container{ - Name: activatePVCOSDInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "ceph-bluestore-tool", - }, - Args: []string{"prime-osd-dir", "--dev", osdDataBlockPath, "--path", osdDataPath, "--no-mon-config"}, - VolumeDevices: []v1.VolumeDevice{ - { - Name: osdProps.pvc.ClaimName, - DevicePath: osdDataBlockPath, - }, - }, - VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(osdDataPath, osdProps.pvc.ClaimName)}, - SecurityContext: PrivilegedContext(), - Resources: osdProps.resources, - } - - return container -} - -func (c *Cluster) getExpandPVCInitContainer(osdProps osdProperties, osdID string) v1.Container { - /* Output example from 10GiB to 20GiB: - - inferring bluefs devices from bluestore path - 1 : device size 0x4ffe00000 : own 0x[11ff00000~40000000] = 0x40000000 : using 0x470000(4.4 MiB) : bluestore has 0x23fdd0000(9.0 GiB) available - Expanding DB/WAL... - Expanding Main... - 1 : expanding from 0x27fe00000 to 0x4ffe00000 - 1 : size label updated to 21472739328 - - */ - osdDataPath := activateOSDMountPath + osdID - - return v1.Container{ - Name: expandPVCOSDInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "ceph-bluestore-tool", - }, - Args: []string{"bluefs-bdev-expand", "--path", osdDataPath}, - VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(osdDataPath, osdProps.pvc.ClaimName)}, - SecurityContext: PrivilegedContext(), - Resources: osdProps.resources, - } -} - -func (c *Cluster) getExpandEncryptedPVCInitContainer(mountPath string, osdProps osdProperties) v1.Container { - /* Command example - [root@rook-ceph-osd-0-59b9947547-w8mdq /]# cryptsetup resize set1-data-2-8n462-block-dmcrypt - Command successful. - */ - - // Add /dev/mapper in the volume mount list - // This will fix issues when running on multi-path, where cryptsetup complains that the underlying device does not exist - // Essentially, the device cannot be found because it was not mounted in the container - // Typically, the device is mapped to the OSD data dir so it is mounted - volMount := []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, osdProps.pvc.ClaimName)} - _, volMountMapper := getDeviceMapperVolume() - volMount = append(volMount, volMountMapper) - - return v1.Container{ - Name: expandEncryptedPVCOSDInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "cryptsetup", - }, - Args: []string{"--verbose", "resize", encryptionDMName(osdProps.pvc.ClaimName, DmcryptBlockType)}, - VolumeMounts: volMount, - SecurityContext: PrivilegedContext(), - Resources: osdProps.resources, - } -} - -func (c *Cluster) getEncryptedStatusPVCInitContainer(mountPath string, osdProps osdProperties) v1.Container { - /* Command example: - root@rook-ceph-osd-0-59b9947547-w8mdq /]# cryptsetup status set1-data-2-8n462-block-dmcrypt -v - /dev/mapper/set1-data-2-8n462-block-dmcrypt is active and is in use. - type: LUKS1 - cipher: aes-xts-plain64 - keysize: 256 bits - key location: dm-crypt - device: /dev/xvdbv - sector size: 512 - offset: 4096 sectors - size: 20967424 sectors - mode: read/write - flags: discards - Command successful. - */ - - return v1.Container{ - Name: encryptedPVCStatusOSDInitContainer, - Image: c.spec.CephVersion.Image, - Command: []string{ - "cryptsetup", - }, - Args: []string{"--verbose", "status", encryptionDMName(osdProps.pvc.ClaimName, DmcryptBlockType)}, - VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, osdProps.pvc.ClaimName)}, - SecurityContext: PrivilegedContext(), - Resources: osdProps.resources, - } -} diff --git a/pkg/operator/ceph/cluster/osd/spec_test.go b/pkg/operator/ceph/cluster/osd/spec_test.go deleted file mode 100644 index 030668514..000000000 --- a/pkg/operator/ceph/cluster/osd/spec_test.go +++ /dev/null @@ -1,871 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package osd for the Ceph OSDs. -package osd - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" - opconfig "github.com/rook/rook/pkg/operator/ceph/config" - operatortest "github.com/rook/rook/pkg/operator/ceph/test" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/client-go/kubernetes/fake" -) - -func TestPodContainer(t *testing.T) { - cluster := &Cluster{rookVersion: "23", clusterInfo: cephclient.AdminClusterInfo("myosd")} - cluster.clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - osdProps := osdProperties{ - crushHostname: "node", - devices: []cephv1.Device{}, - resources: v1.ResourceRequirements{}, - storeConfig: config.StoreConfig{}, - schedulerName: "custom-scheduler", - } - dataPathMap := &provisionConfig{ - DataPathMap: opconfig.NewDatalessDaemonDataPathMap(cluster.clusterInfo.Namespace, "/var/lib/rook"), - } - c, err := cluster.provisionPodTemplateSpec(osdProps, v1.RestartPolicyAlways, dataPathMap) - assert.NotNil(t, c) - assert.Nil(t, err) - assert.Equal(t, 1, len(c.Spec.InitContainers)) - assert.Equal(t, 1, len(c.Spec.Containers)) - assert.Equal(t, "custom-scheduler", c.Spec.SchedulerName) - container := c.Spec.InitContainers[0] - logger.Infof("container: %+v", container) - assert.Equal(t, "copy-binaries", container.Args[0]) - container = c.Spec.Containers[0] - assert.Equal(t, "/rook/tini", container.Command[0]) - assert.Equal(t, "--", container.Args[0]) - assert.Equal(t, "/rook/rook", container.Args[1]) - assert.Equal(t, "ceph", container.Args[2]) - assert.Equal(t, "osd", container.Args[3]) - assert.Equal(t, "provision", container.Args[4]) - - for _, c := range c.Spec.Containers { - vars := operatortest.FindDuplicateEnvVars(c) - assert.Equal(t, 0, len(vars)) - } -} - -func TestDaemonset(t *testing.T) { - testPodDevices(t, "", "sda", true) - testPodDevices(t, "/var/lib/mydatadir", "sdb", false) - testPodDevices(t, "", "", true) - testPodDevices(t, "", "", false) -} - -func testPodDevices(t *testing.T, dataDir, deviceName string, allDevices bool) { - devices := []cephv1.Device{ - {Name: deviceName}, - } - - clientset := fake.NewSimpleClientset() - clusterInfo := &cephclient.ClusterInfo{ - Namespace: "ns", - CephVersion: cephver.Nautilus, - } - clusterInfo.SetName("test") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: &exectest.MockExecutor{}} - spec := cephv1.ClusterSpec{ - CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:v15"}, - Storage: cephv1.StorageScopeSpec{ - Selection: cephv1.Selection{UseAllDevices: &allDevices, DeviceFilter: deviceName}, - Nodes: []cephv1.Node{{Name: "node1"}}, - }, - PriorityClassNames: map[rook.KeyType]string{ - cephv1.KeyOSD: "my-priority-class", - }, - Annotations: cephv1.AnnotationsSpec{ - "osd": map[string]string{ - "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES": "134217728", - }, - }, - } - c := New(context, clusterInfo, spec, "rook/rook:myversion") - - devMountNeeded := deviceName != "" || allDevices - - n := c.spec.Storage.ResolveNode(spec.Storage.Nodes[0].Name) - if len(devices) == 0 && len(dataDir) == 0 { - return - } - osd := OSDInfo{ - ID: 0, - CVMode: "raw", - } - - osdProp := osdProperties{ - crushHostname: n.Name, - selection: n.Selection, - resources: v1.ResourceRequirements{}, - storeConfig: config.StoreConfig{}, - schedulerName: "custom-scheduler", - } - - dataPathMap := &provisionConfig{ - DataPathMap: opconfig.NewDatalessDaemonDataPathMap(c.clusterInfo.Namespace, "/var/lib/rook"), - } - - // Test LVM based on OSD on bare metal - deployment, err := c.makeDeployment(osdProp, osd, dataPathMap) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.Equal(t, "rook-ceph-osd-0", deployment.Name) - assert.Equal(t, c.clusterInfo.Namespace, deployment.Namespace) - assert.Equal(t, serviceAccountName, deployment.Spec.Template.Spec.ServiceAccountName) - assert.Equal(t, int32(1), *(deployment.Spec.Replicas)) - assert.Equal(t, "node1", deployment.Spec.Template.Spec.NodeSelector[v1.LabelHostname]) - assert.Equal(t, v1.RestartPolicyAlways, deployment.Spec.Template.Spec.RestartPolicy) - assert.Equal(t, "my-priority-class", deployment.Spec.Template.Spec.PriorityClassName) - if devMountNeeded && len(dataDir) > 0 { - assert.Equal(t, 8, len(deployment.Spec.Template.Spec.Volumes)) - } - if devMountNeeded && len(dataDir) == 0 { - assert.Equal(t, 8, len(deployment.Spec.Template.Spec.Volumes)) - } - if !devMountNeeded && len(dataDir) > 0 { - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Volumes)) - } - assert.Equal(t, "custom-scheduler", deployment.Spec.Template.Spec.SchedulerName) - - assert.Equal(t, "rook-data", deployment.Spec.Template.Spec.Volumes[0].Name) - - assert.Equal(t, AppName, deployment.Spec.Template.ObjectMeta.Name) - assert.Equal(t, AppName, deployment.Spec.Template.ObjectMeta.Labels["app"]) - assert.Equal(t, c.clusterInfo.Namespace, deployment.Spec.Template.ObjectMeta.Labels["rook_cluster"]) - assert.Equal(t, 1, len(deployment.Spec.Template.ObjectMeta.Annotations)) - - assert.Equal(t, 2, len(deployment.Spec.Template.Spec.InitContainers)) - initCont := deployment.Spec.Template.Spec.InitContainers[0] - assert.Equal(t, "quay.io/ceph/ceph:v15", initCont.Image) - assert.Equal(t, "activate", initCont.Name) - assert.Equal(t, 4, len(initCont.VolumeMounts)) - - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers)) - cont := deployment.Spec.Template.Spec.Containers[0] - assert.Equal(t, spec.CephVersion.Image, cont.Image) - assert.Equal(t, 7, len(cont.VolumeMounts)) - assert.Equal(t, "ceph-osd", cont.Command[0]) - verifyEnvVar(t, cont.Env, "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES", "134217728", true) - - // Test OSD on PVC with LVM - osdProp = osdProperties{ - crushHostname: n.Name, - selection: n.Selection, - resources: v1.ResourceRequirements{}, - storeConfig: config.StoreConfig{}, - pvc: v1.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}, - } - // Not needed when running on PVC - osd = OSDInfo{ - ID: 0, - CVMode: "lvm", - } - - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.Equal(t, 4, len(deployment.Spec.Template.Spec.InitContainers), deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "config-init", deployment.Spec.Template.Spec.InitContainers[0].Name) - assert.Equal(t, "copy-bins", deployment.Spec.Template.Spec.InitContainers[1].Name) - assert.Equal(t, "blkdevmapper", deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "chown-container-data-dir", deployment.Spec.Template.Spec.InitContainers[3].Name) - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers)) - initCont = deployment.Spec.Template.Spec.InitContainers[0] - assert.Equal(t, 4, len(initCont.VolumeMounts), initCont.VolumeMounts) - blkInitCont := deployment.Spec.Template.Spec.InitContainers[2] - assert.Equal(t, 1, len(blkInitCont.VolumeDevices)) - cont = deployment.Spec.Template.Spec.Containers[0] - assert.Equal(t, 8, len(cont.VolumeMounts), cont.VolumeMounts) - - // Test OSD on PVC with RAW - osd = OSDInfo{ - ID: 0, - CVMode: "raw", - } - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.Equal(t, 4, len(deployment.Spec.Template.Spec.InitContainers), deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "blkdevmapper", deployment.Spec.Template.Spec.InitContainers[0].Name) - assert.Equal(t, "activate", deployment.Spec.Template.Spec.InitContainers[1].Name) - assert.Equal(t, "expand-bluefs", deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "chown-container-data-dir", deployment.Spec.Template.Spec.InitContainers[3].Name) - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers)) - cont = deployment.Spec.Template.Spec.Containers[0] - assert.Equal(t, 6, len(cont.VolumeMounts), cont.VolumeMounts) - - // Test with encrypted OSD on PVC with RAW - osdProp.encrypted = true - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.Equal(t, 8, len(deployment.Spec.Template.Spec.InitContainers), deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "blkdevmapper", deployment.Spec.Template.Spec.InitContainers[0].Name) - assert.Equal(t, "encryption-open", deployment.Spec.Template.Spec.InitContainers[1].Name) - assert.Equal(t, "blkdevmapper-encryption", deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "encrypted-block-status", deployment.Spec.Template.Spec.InitContainers[3].Name) - assert.Equal(t, "expand-encrypted-bluefs", deployment.Spec.Template.Spec.InitContainers[4].Name) - assert.Equal(t, 2, len(deployment.Spec.Template.Spec.InitContainers[4].VolumeMounts), deployment.Spec.Template.Spec.InitContainers[4].VolumeMounts) - assert.Equal(t, "dev-mapper", deployment.Spec.Template.Spec.InitContainers[4].VolumeMounts[1].Name, deployment.Spec.Template.Spec.InitContainers[4].VolumeMounts) - assert.Equal(t, "activate", deployment.Spec.Template.Spec.InitContainers[5].Name) - assert.Equal(t, "expand-bluefs", deployment.Spec.Template.Spec.InitContainers[6].Name) - assert.Equal(t, "chown-container-data-dir", deployment.Spec.Template.Spec.InitContainers[7].Name) - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers)) - cont = deployment.Spec.Template.Spec.Containers[0] - assert.Equal(t, 7, len(cont.VolumeMounts), cont.VolumeMounts) - osdProp.encrypted = false - assert.Equal(t, 9, len(deployment.Spec.Template.Spec.Volumes), deployment.Spec.Template.Spec.Volumes) - - // // Test OSD on PVC with RAW and metadata device - osd = OSDInfo{ - ID: 0, - CVMode: "raw", - } - osdProp.metadataPVC = v1.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc-metadata"} - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.Equal(t, 5, len(deployment.Spec.Template.Spec.InitContainers)) - assert.Equal(t, "blkdevmapper", deployment.Spec.Template.Spec.InitContainers[0].Name) - assert.Equal(t, "blkdevmapper-metadata", deployment.Spec.Template.Spec.InitContainers[1].Name) - assert.Equal(t, "activate", deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "expand-bluefs", deployment.Spec.Template.Spec.InitContainers[3].Name) - assert.Equal(t, "chown-container-data-dir", deployment.Spec.Template.Spec.InitContainers[4].Name) - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers)) - cont = deployment.Spec.Template.Spec.Containers[0] - assert.Equal(t, 6, len(cont.VolumeMounts), cont.VolumeMounts) - blkInitCont = deployment.Spec.Template.Spec.InitContainers[1] - assert.Equal(t, 1, len(blkInitCont.VolumeDevices)) - blkMetaInitCont := deployment.Spec.Template.Spec.InitContainers[2] - assert.Equal(t, 1, len(blkMetaInitCont.VolumeDevices)) - assert.Equal(t, 9, len(deployment.Spec.Template.Spec.Volumes), deployment.Spec.Template.Spec.Volumes) - - // // Test encrypted OSD on PVC with RAW and metadata device - osd = OSDInfo{ - ID: 0, - CVMode: "raw", - } - osdProp.encrypted = true - osdProp.metadataPVC = v1.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc-metadata"} - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.Equal(t, 11, len(deployment.Spec.Template.Spec.InitContainers)) - assert.Equal(t, "blkdevmapper", deployment.Spec.Template.Spec.InitContainers[0].Name) - assert.Equal(t, "blkdevmapper-metadata", deployment.Spec.Template.Spec.InitContainers[1].Name) - assert.Equal(t, "encryption-open", deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "encryption-open-metadata", deployment.Spec.Template.Spec.InitContainers[3].Name) - assert.Equal(t, "blkdevmapper-encryption", deployment.Spec.Template.Spec.InitContainers[4].Name) - assert.Equal(t, "blkdevmapper-metadata-encryption", deployment.Spec.Template.Spec.InitContainers[5].Name) - assert.Equal(t, "encrypted-block-status", deployment.Spec.Template.Spec.InitContainers[6].Name) - assert.Equal(t, "expand-encrypted-bluefs", deployment.Spec.Template.Spec.InitContainers[7].Name) - assert.Equal(t, "activate", deployment.Spec.Template.Spec.InitContainers[8].Name) - assert.Equal(t, "expand-bluefs", deployment.Spec.Template.Spec.InitContainers[9].Name) - assert.Equal(t, "chown-container-data-dir", deployment.Spec.Template.Spec.InitContainers[10].Name) - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers)) - cont = deployment.Spec.Template.Spec.Containers[0] - assert.Equal(t, 7, len(cont.VolumeMounts), cont.VolumeMounts) - blkInitCont = deployment.Spec.Template.Spec.InitContainers[1] - assert.Equal(t, 1, len(blkInitCont.VolumeDevices)) - blkMetaInitCont = deployment.Spec.Template.Spec.InitContainers[8] - assert.Equal(t, 1, len(blkMetaInitCont.VolumeDevices)) - osdProp.encrypted = false - assert.Equal(t, 11, len(deployment.Spec.Template.Spec.Volumes), deployment.Spec.Template.Spec.Volumes) - - // // Test OSD on PVC with RAW / metadata and wal device - osd = OSDInfo{ - ID: 0, - CVMode: "raw", - } - osdProp.metadataPVC = v1.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc-metadata"} - osdProp.walPVC = v1.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc-wal"} - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.Equal(t, 6, len(deployment.Spec.Template.Spec.InitContainers)) - assert.Equal(t, "blkdevmapper", deployment.Spec.Template.Spec.InitContainers[0].Name) - assert.Equal(t, "blkdevmapper-metadata", deployment.Spec.Template.Spec.InitContainers[1].Name) - assert.Equal(t, "blkdevmapper-wal", deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "activate", deployment.Spec.Template.Spec.InitContainers[3].Name) - assert.Equal(t, "expand-bluefs", deployment.Spec.Template.Spec.InitContainers[4].Name) - assert.Equal(t, "chown-container-data-dir", deployment.Spec.Template.Spec.InitContainers[5].Name) - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers)) - cont = deployment.Spec.Template.Spec.Containers[0] - assert.Equal(t, 6, len(cont.VolumeMounts), cont.VolumeMounts) - blkInitCont = deployment.Spec.Template.Spec.InitContainers[1] - assert.Equal(t, 1, len(blkInitCont.VolumeDevices)) - blkMetaInitCont = deployment.Spec.Template.Spec.InitContainers[2] - assert.Equal(t, 1, len(blkMetaInitCont.VolumeDevices)) - assert.Equal(t, 11, len(deployment.Spec.Template.Spec.Volumes), deployment.Spec.Template.Spec.Volumes) - - // // Test encrypted OSD on PVC with RAW / metadata and wal device - osd = OSDInfo{ - ID: 0, - CVMode: "raw", - } - osdProp.encrypted = true - osdProp.metadataPVC = v1.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc-metadata"} - osdProp.walPVC = v1.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc-wal"} - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.Equal(t, 14, len(deployment.Spec.Template.Spec.InitContainers)) - assert.Equal(t, "blkdevmapper", deployment.Spec.Template.Spec.InitContainers[0].Name) - assert.Equal(t, "blkdevmapper-metadata", deployment.Spec.Template.Spec.InitContainers[1].Name) - assert.Equal(t, "blkdevmapper-wal", deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "encryption-open", deployment.Spec.Template.Spec.InitContainers[3].Name) - assert.Equal(t, "encryption-open-metadata", deployment.Spec.Template.Spec.InitContainers[4].Name) - assert.Equal(t, "encryption-open-wal", deployment.Spec.Template.Spec.InitContainers[5].Name) - assert.Equal(t, "blkdevmapper-encryption", deployment.Spec.Template.Spec.InitContainers[6].Name) - assert.Equal(t, "blkdevmapper-metadata-encryption", deployment.Spec.Template.Spec.InitContainers[7].Name) - assert.Equal(t, "blkdevmapper-wal-encryption", deployment.Spec.Template.Spec.InitContainers[8].Name) - assert.Equal(t, "encrypted-block-status", deployment.Spec.Template.Spec.InitContainers[9].Name) - assert.Equal(t, "expand-encrypted-bluefs", deployment.Spec.Template.Spec.InitContainers[10].Name) - assert.Equal(t, "activate", deployment.Spec.Template.Spec.InitContainers[11].Name) - assert.Equal(t, "expand-bluefs", deployment.Spec.Template.Spec.InitContainers[12].Name) - assert.Equal(t, "chown-container-data-dir", deployment.Spec.Template.Spec.InitContainers[13].Name) - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers)) - cont = deployment.Spec.Template.Spec.Containers[0] - assert.Equal(t, 7, len(cont.VolumeMounts), cont.VolumeMounts) - blkInitCont = deployment.Spec.Template.Spec.InitContainers[1] - assert.Equal(t, 1, len(blkInitCont.VolumeDevices)) - blkMetaInitCont = deployment.Spec.Template.Spec.InitContainers[11] - assert.Equal(t, 1, len(blkMetaInitCont.VolumeDevices)) - assert.Equal(t, 13, len(deployment.Spec.Template.Spec.Volumes), deployment.Spec.Template.Spec.Volumes) - - // Test with encrypted OSD on PVC with RAW with KMS - osdProp.encrypted = true - osdProp.metadataPVC = v1.PersistentVolumeClaimVolumeSource{} - osdProp.walPVC = v1.PersistentVolumeClaimVolumeSource{} - c.spec.Security.KeyManagementService.ConnectionDetails = map[string]string{"KMS_PROVIDER": "vault"} - c.spec.Security.KeyManagementService.TokenSecretName = "vault-token" - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.Equal(t, 9, len(deployment.Spec.Template.Spec.InitContainers), deployment.Spec.Template.Spec.InitContainers) - assert.Equal(t, "blkdevmapper", deployment.Spec.Template.Spec.InitContainers[0].Name) - assert.Equal(t, "encryption-kms-get-kek", deployment.Spec.Template.Spec.InitContainers[1].Name) - assert.Equal(t, "encryption-open", deployment.Spec.Template.Spec.InitContainers[2].Name) - assert.Equal(t, "blkdevmapper-encryption", deployment.Spec.Template.Spec.InitContainers[3].Name) - assert.Equal(t, "encrypted-block-status", deployment.Spec.Template.Spec.InitContainers[4].Name) - assert.Equal(t, "expand-encrypted-bluefs", deployment.Spec.Template.Spec.InitContainers[5].Name) - assert.Equal(t, "activate", deployment.Spec.Template.Spec.InitContainers[6].Name) - assert.Equal(t, "expand-bluefs", deployment.Spec.Template.Spec.InitContainers[7].Name) - assert.Equal(t, "chown-container-data-dir", deployment.Spec.Template.Spec.InitContainers[8].Name) - assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers)) - cont = deployment.Spec.Template.Spec.Containers[0] - assert.Equal(t, 7, len(cont.VolumeMounts), cont.VolumeMounts) - assert.Equal(t, 9, len(deployment.Spec.Template.Spec.Volumes), deployment.Spec.Template.Spec.Volumes) // One more than the encryption with k8s for the kek get init container - - // Test with encrypted OSD on PVC with RAW with KMS with TLS - osdProp.encrypted = true - osdProp.metadataPVC = v1.PersistentVolumeClaimVolumeSource{} - osdProp.walPVC = v1.PersistentVolumeClaimVolumeSource{} - c.spec.Security.KeyManagementService.ConnectionDetails = map[string]string{"KMS_PROVIDER": "vault", "VAULT_CACERT": "vault-ca-cert-secret", "VAULT_CLIENT_CERT": "vault-client-cert-secret", "VAULT_CLIENT_KEY": "vault-client-key-secret"} - c.spec.Security.KeyManagementService.TokenSecretName = "vault-token" - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.Equal(t, 10, len(deployment.Spec.Template.Spec.Volumes), deployment.Spec.Template.Spec.Volumes) // One more than the encryption with k8s for the kek get init container - assert.Equal(t, 3, len(deployment.Spec.Template.Spec.Volumes[7].VolumeSource.Projected.Sources), deployment.Spec.Template.Spec.Volumes[0]) // 3 more since we have the tls secrets - osdProp.encrypted = false - - // Test tune Fast settings when OSD on PVC - osdProp.tuneFastDeviceClass = true - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.NoError(t, err) - for _, flag := range defaultTuneFastSettings { - assert.Contains(t, deployment.Spec.Template.Spec.Containers[0].Args, flag) - } - - // Test tune Slow settings when OSD on PVC - osdProp.tuneSlowDeviceClass = true - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.NoError(t, err) - for _, flag := range defaultTuneSlowSettings { - assert.Contains(t, deployment.Spec.Template.Spec.Containers[0].Args, flag) - } - - // Test shareProcessNamespace presence - assert.True(t, deployment.Spec.Template.Spec.HostPID) - if deployment.Spec.Template.Spec.ShareProcessNamespace != nil { - panic("ShareProcessNamespace should be nil") - } - - // Turn on log collector - c.spec.LogCollector.Enabled = true - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.NoError(t, err) - assert.True(t, deployment.Spec.Template.Spec.HostPID, deployment.Spec.Template.Spec.HostPID) - if deployment.Spec.Template.Spec.ShareProcessNamespace != nil { - panic("ShareProcessNamespace should be nil") - } - - // Test hostPID and ShareProcessNamespace - { - // now set ceph version to nautilus - clusterInfo := &cephclient.ClusterInfo{ - Namespace: "ns", - CephVersion: cephver.Octopus, - } - clusterInfo.SetName("test") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - c := New(context, clusterInfo, spec, "rook/rook:myversion") - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.NoError(t, err) - assert.False(t, deployment.Spec.Template.Spec.HostPID, deployment.Spec.Template.Spec.HostPID) - - // Turn on log collector - c.spec.LogCollector.Enabled = true - deployment, err = c.makeDeployment(osdProp, osd, dataPathMap) - assert.NoError(t, err) - shareProcessNamespace := *deployment.Spec.Template.Spec.ShareProcessNamespace - assert.True(t, shareProcessNamespace) - } - -} - -func verifyEnvVar(t *testing.T, envVars []v1.EnvVar, expectedName, expectedValue string, expectedFound bool) { - found := false - for _, envVar := range envVars { - if envVar.Name == expectedName { - assert.Equal(t, expectedValue, envVar.Value) - found = true - break - } - } - - assert.Equal(t, expectedFound, found) -} - -func TestStorageSpecConfig(t *testing.T) { - clientset := fake.NewSimpleClientset() - clusterInfo := &cephclient.ClusterInfo{ - Namespace: "ns", - CephVersion: cephver.Nautilus, - } - clusterInfo.SetName("testing") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: &exectest.MockExecutor{}} - spec := cephv1.ClusterSpec{ - DataDirHostPath: context.ConfigDir, - Storage: cephv1.StorageScopeSpec{ - Config: map[string]string{ - "crushRoot": "custom-root", - }, - Nodes: []cephv1.Node{ - { - Name: "node1", - Config: map[string]string{ - "databaseSizeMB": "10", - "walSizeMB": "20", - "metadataDevice": "nvme093", - }, - Selection: cephv1.Selection{}, - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(1024.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(4096.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(500.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(2048.0, resource.BinarySI), - }, - }, - }, - }, - }, - } - - c := New(context, clusterInfo, spec, "rook/rook:myversion") - n := c.spec.Storage.ResolveNode(spec.Storage.Nodes[0].Name) - storeConfig := config.ToStoreConfig(spec.Storage.Nodes[0].Config) - metadataDevice := config.MetadataDevice(spec.Storage.Nodes[0].Config) - - osdProp := osdProperties{ - crushHostname: n.Name, - devices: n.Devices, - selection: n.Selection, - resources: c.spec.Storage.Nodes[0].Resources, - storeConfig: storeConfig, - metadataDevice: metadataDevice, - } - - dataPathMap := &provisionConfig{ - DataPathMap: opconfig.NewDatalessDaemonDataPathMap(c.clusterInfo.Namespace, "/var/lib/rook"), - } - - job, err := c.makeJob(osdProp, dataPathMap) - assert.NotNil(t, job) - assert.Nil(t, err) - assert.Equal(t, "rook-ceph-osd-prepare-node1", job.ObjectMeta.Name) - container := job.Spec.Template.Spec.InitContainers[0] - assert.NotNil(t, container) - container = job.Spec.Template.Spec.Containers[0] - assert.NotNil(t, container) - verifyEnvVar(t, container.Env, "ROOK_OSD_DATABASE_SIZE", "10", true) - verifyEnvVar(t, container.Env, "ROOK_OSD_WAL_SIZE", "20", true) - verifyEnvVar(t, container.Env, "ROOK_METADATA_DEVICE", "nvme093", true) - verifyEnvVar(t, container.Env, CrushRootVarName, "custom-root", true) -} - -func TestHostNetwork(t *testing.T) { - storageSpec := cephv1.StorageScopeSpec{ - Nodes: []cephv1.Node{ - { - Name: "node1", - Config: map[string]string{ - "databaseSizeMB": "10", - "walSizeMB": "20", - }, - }, - }, - } - - clientset := fake.NewSimpleClientset() - clusterInfo := &cephclient.ClusterInfo{ - Namespace: "ns", - CephVersion: cephver.Nautilus, - } - clusterInfo.SetName("test") - - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: &exectest.MockExecutor{}} - spec := cephv1.ClusterSpec{ - Storage: storageSpec, - Network: cephv1.NetworkSpec{HostNetwork: true}, - } - c := New(context, clusterInfo, spec, "rook/rook:myversion") - - n := c.spec.Storage.ResolveNode(storageSpec.Nodes[0].Name) - osd := OSDInfo{ - ID: 0, - CVMode: "raw", - } - - osdProp := osdProperties{ - crushHostname: n.Name, - devices: n.Devices, - selection: n.Selection, - resources: c.spec.Storage.Nodes[0].Resources, - storeConfig: config.StoreConfig{}, - } - - dataPathMap := &provisionConfig{ - DataPathMap: opconfig.NewDatalessDaemonDataPathMap(c.clusterInfo.Namespace, "/var/lib/rook"), - } - - r, err := c.makeDeployment(osdProp, osd, dataPathMap) - assert.NotNil(t, r) - assert.Nil(t, err) - - assert.Equal(t, "rook-ceph-osd-0", r.ObjectMeta.Name) - assert.Equal(t, true, r.Spec.Template.Spec.HostNetwork) - assert.Equal(t, v1.DNSClusterFirstWithHostNet, r.Spec.Template.Spec.DNSPolicy) -} - -func TestOsdPrepareResources(t *testing.T) { - clientset := fake.NewSimpleClientset() - - context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: &exectest.MockExecutor{}} - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns"} - clusterInfo.SetName("test") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - spec := cephv1.ClusterSpec{ - Resources: map[string]v1.ResourceRequirements{"prepareosd": { - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(2000.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(250.0, resource.BinarySI), - }, - }, - }, - } - c := New(context, clusterInfo, spec, "rook/rook:myversion") - - r := cephv1.GetPrepareOSDResources(c.spec.Resources) - assert.Equal(t, "2000", r.Limits.Cpu().String()) - assert.Equal(t, "0", r.Requests.Cpu().String()) - assert.Equal(t, "0", r.Limits.Memory().String()) - assert.Equal(t, "250", r.Requests.Memory().String()) -} - -func TestClusterGetPVCEncryptionOpenInitContainerActivate(t *testing.T) { - c := New(&clusterd.Context{}, &cephclient.ClusterInfo{OwnerInfo: &k8sutil.OwnerInfo{}}, cephv1.ClusterSpec{}, "rook/rook:myversion") - osdProperties := osdProperties{ - pvc: v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc1", - }, - } - mountPath := "/var/lib/ceph/osd/ceph-0" - - // No metadata PVC - containers := c.getPVCEncryptionOpenInitContainerActivate(mountPath, osdProperties) - assert.Equal(t, 1, len(containers)) - - // With metadata PVC - osdProperties.metadataPVC.ClaimName = "pvcDB" - containers = c.getPVCEncryptionOpenInitContainerActivate(mountPath, osdProperties) - assert.Equal(t, 2, len(containers)) - - // With wal PVC - osdProperties.walPVC.ClaimName = "pvcWal" - containers = c.getPVCEncryptionOpenInitContainerActivate(mountPath, osdProperties) - assert.Equal(t, 3, len(containers)) -} - -func TestClusterGetPVCEncryptionInitContainerActivate(t *testing.T) { - c := New(&clusterd.Context{}, &cephclient.ClusterInfo{OwnerInfo: &k8sutil.OwnerInfo{}}, cephv1.ClusterSpec{}, "rook/rook:myversion") - osdProperties := osdProperties{ - pvc: v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc1", - }, - resources: v1.ResourceRequirements{}, - } - mountPath := "/var/lib/ceph/osd/ceph-0" - - // No metadata PVC - containers := c.getPVCEncryptionInitContainerActivate(mountPath, osdProperties) - assert.Equal(t, 1, len(containers)) - - // With metadata PVC - osdProperties.metadataPVC.ClaimName = "pvcDB" - containers = c.getPVCEncryptionInitContainerActivate(mountPath, osdProperties) - assert.Equal(t, 2, len(containers)) - - // With wal PVC - osdProperties.walPVC.ClaimName = "pvcWal" - containers = c.getPVCEncryptionInitContainerActivate(mountPath, osdProperties) - assert.Equal(t, 3, len(containers)) -} - -// WARNING! modifies c.deviceSets -func getDummyDeploymentOnPVC(clientset *fake.Clientset, c *Cluster, pvcName string, osdID int) *appsv1.Deployment { - osd := OSDInfo{ - ID: osdID, - UUID: "some-uuid", - BlockPath: "/some/path", - CVMode: "raw", - } - c.deviceSets = append(c.deviceSets, deviceSet{ - Name: pvcName, - PVCSources: map[string]v1.PersistentVolumeClaimVolumeSource{ - bluestorePVCData: {ClaimName: pvcName}, - }, - Portable: true, - }) - config := c.newProvisionConfig() - d, err := deploymentOnPVC(c, osd, pvcName, config) - if err != nil { - panic(err) - } - return d -} - -// WARNING! modifies c.ValidStorage -func getDummyDeploymentOnNode(clientset *fake.Clientset, c *Cluster, nodeName string, osdID int) *appsv1.Deployment { - osd := OSDInfo{ - ID: osdID, - UUID: "some-uuid", - BlockPath: "/dev/vda", - CVMode: "raw", - } - c.ValidStorage.Nodes = append(c.ValidStorage.Nodes, cephv1.Node{Name: nodeName}) - config := c.newProvisionConfig() - d, err := deploymentOnNode(c, osd, nodeName, config) - if err != nil { - panic(err) - } - return d -} - -func TestOSDPlacement(t *testing.T) { - clientset := fake.NewSimpleClientset() - clusterInfo := &cephclient.ClusterInfo{ - Namespace: "ns", - CephVersion: cephver.Nautilus, - } - clusterInfo.SetName("testing") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: &exectest.MockExecutor{}} - - spec := cephv1.ClusterSpec{ - Placement: cephv1.PlacementSpec{ - "all": { - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{{ - Key: "role", - Operator: v1.NodeSelectorOpIn, - Values: []string{"storage-node1"}, - }}, - }, - }, - }, - }, - }, - "osd": { - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{{ - Key: "role", - Operator: v1.NodeSelectorOpIn, - Values: []string{"storage-node1"}, - }}, - }, - }, - }, - }, - }, - "prepareosd": { - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{{ - Key: "role", - Operator: v1.NodeSelectorOpIn, - Values: []string{"storage-node1"}, - }}, - }, - }, - }, - }, - }, - }, - Storage: cephv1.StorageScopeSpec{ - OnlyApplyOSDPlacement: false, - }, - } - - osdProps := osdProperties{ - pvc: v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc1", - }, - } - osdProps.placement = cephv1.Placement{NodeAffinity: &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "role", - Operator: v1.NodeSelectorOpIn, - Values: []string{"storage-node3"}, - }, - }, - }, - }, - }, - }, - } - - osdProps.preparePlacement = &cephv1.Placement{NodeAffinity: &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "role", - Operator: v1.NodeSelectorOpIn, - Values: []string{"storage-node3"}, - }, - }, - }, - }, - }, - }, - } - - c := New(context, clusterInfo, spec, "rook/rook:myversion") - osd := OSDInfo{ - ID: 0, - CVMode: "raw", - } - - dataPathMap := &provisionConfig{ - DataPathMap: opconfig.NewDatalessDaemonDataPathMap(c.clusterInfo.Namespace, "/var/lib/rook"), - } - - // For OSD daemon - // When OnlyApplyOSDPlacement false, in case of PVC - r, err := c.makeDeployment(osdProps, osd, dataPathMap) - assert.NoError(t, err) - assert.Equal(t, 2, len(r.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) - - // For OSD-prepare job - job, err := c.makeJob(osdProps, dataPathMap) - assert.NoError(t, err) - assert.Equal(t, 2, len(job.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) - - // When OnlyApplyOSDPlacement true, in case of PVC - spec.Storage.OnlyApplyOSDPlacement = true - c = New(context, clusterInfo, spec, "rook/rook:myversion") - r, err = c.makeDeployment(osdProps, osd, dataPathMap) - assert.NoError(t, err) - assert.Equal(t, 1, len(r.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) - - // For OSD-prepare job - job, err = c.makeJob(osdProps, dataPathMap) - assert.NoError(t, err) - assert.Equal(t, 1, len(job.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) - - // When OnlyApplyOSDPlacement false, in case of non-PVC - spec.Storage.OnlyApplyOSDPlacement = false - osdProps = osdProperties{} - c = New(context, clusterInfo, spec, "rook/rook:myversion") - r, err = c.makeDeployment(osdProps, osd, dataPathMap) - assert.NoError(t, err) - assert.Equal(t, 2, len(r.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) - - // For OSD-prepare job - job, err = c.makeJob(osdProps, dataPathMap) - assert.NoError(t, err) - assert.Equal(t, 2, len(job.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) - - // When OnlyApplyOSDPlacement true, in case of non-PVC - spec.Storage.OnlyApplyOSDPlacement = true - c = New(context, clusterInfo, spec, "rook/rook:myversion") - r, err = c.makeDeployment(osdProps, osd, dataPathMap) - assert.NoError(t, err) - assert.Equal(t, 1, len(r.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) - - // For OSD-prepare job - job, err = c.makeJob(osdProps, dataPathMap) - assert.NoError(t, err) - assert.Equal(t, 1, len(job.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) -} diff --git a/pkg/operator/ceph/cluster/osd/status.go b/pkg/operator/ceph/cluster/osd/status.go deleted file mode 100644 index 9943c7d0b..000000000 --- a/pkg/operator/ceph/cluster/osd/status.go +++ /dev/null @@ -1,370 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package osd for the Ceph OSDs. -package osd - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8swatch "k8s.io/apimachinery/pkg/watch" -) - -const ( - // OrchestrationStatusStarting denotes the OSD provisioning is beginning. - OrchestrationStatusStarting = "starting" - // OrchestrationStatusOrchestrating denotes the OSD provisioning has begun and is running. - OrchestrationStatusOrchestrating = "orchestrating" - // OrchestrationStatusCompleted denotes the OSD provisioning has completed. This does not imply - // the provisioning completed successfully in whole or in part. - OrchestrationStatusCompleted = "completed" - // OrchestrationStatusFailed denotes the OSD provisioning has failed. - OrchestrationStatusFailed = "failed" - - orchestrationStatusMapName = "rook-ceph-osd-%s-status" - orchestrationStatusKey = "status" - provisioningLabelKey = "provisioning" - nodeLabelKey = "node" -) - -var ( - // time to wait before updating OSDs opportunistically while waiting for OSDs to finish provisioning - osdOpportunisticUpdateDuration = 100 * time.Millisecond - - // a ticker that ticks every minute to check progress - minuteTickerDuration = time.Minute -) - -type provisionConfig struct { - DataPathMap *config.DataPathMap // location to store data in OSD and OSD prepare containers -} - -func (c *Cluster) newProvisionConfig() *provisionConfig { - return &provisionConfig{ - DataPathMap: config.NewDatalessDaemonDataPathMap(c.clusterInfo.Namespace, c.spec.DataDirHostPath), - } -} - -// The provisionErrors struct can get passed around to provisioning code which can add errors to its -// internal list of errors. The errors will be reported at the end of provisioning. -type provisionErrors struct { - errors []error -} - -func newProvisionErrors() *provisionErrors { - return &provisionErrors{ - errors: []error{}, - } -} - -func (e *provisionErrors) addError(message string, args ...interface{}) { - logger.Errorf(message, args...) - e.errors = append(e.errors, errors.Errorf(message, args...)) -} - -func (e *provisionErrors) len() int { - return len(e.errors) -} - -func (e *provisionErrors) asMessages() string { - o := "" - for _, err := range e.errors { - o = fmt.Sprintf("%s\n%v", o, err) - } - return o -} - -// return name of status ConfigMap -func (c *Cluster) updateOSDStatus(node string, status OrchestrationStatus) string { - return UpdateNodeStatus(c.kv, node, status) -} - -func statusConfigMapLabels(node string) map[string]string { - return map[string]string{ - k8sutil.AppAttr: AppName, - orchestrationStatusKey: provisioningLabelKey, - nodeLabelKey: node, - } -} - -// UpdateNodeStatus updates the status ConfigMap for the OSD on the given node. It returns the name -// the ConfigMap used. -func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status OrchestrationStatus) string { - labels := statusConfigMapLabels(node) - - // update the status map with the given status now - s, _ := json.Marshal(status) - cmName := statusConfigMapName(node) - if err := kv.SetValueWithLabels( - cmName, - orchestrationStatusKey, - string(s), - labels, - ); err != nil { - // log the error, but allow the orchestration to continue even if the status update failed - logger.Errorf("failed to set node %q status to %q for osd orchestration. %s", node, status.Status, status.Message) - } - return cmName -} - -func (c *Cluster) handleOrchestrationFailure(errors *provisionErrors, nodeName, message string, args ...interface{}) { - errors.addError(message, args...) - status := OrchestrationStatus{Status: OrchestrationStatusFailed, Message: message} - UpdateNodeStatus(c.kv, nodeName, status) -} - -func parseOrchestrationStatus(data map[string]string) *OrchestrationStatus { - if data == nil { - return nil - } - - statusRaw, ok := data[orchestrationStatusKey] - if !ok { - return nil - } - - // we have status for this node, unmarshal it - var status OrchestrationStatus - if err := json.Unmarshal([]byte(statusRaw), &status); err != nil { - logger.Warningf("failed to unmarshal orchestration status. status: %s. %v", statusRaw, err) - return nil - } - - return &status -} - -// return errors from this function when OSD provisioning should stop and the reconcile be restarted -func (c *Cluster) updateAndCreateOSDs( - createConfig *createConfig, - updateConfig *updateConfig, - errs *provisionErrors, // add errors here -) error { - // tick every mintue to check-in on housekeeping stuff and report overall progress - minuteTicker := time.NewTicker(minuteTickerDuration) - defer minuteTicker.Stop() - - var err error - - doLoop := true - for doLoop { - doLoop, err = c.updateAndCreateOSDsLoop(createConfig, updateConfig, minuteTicker, errs) - if err != nil { - if !doLoop { - return err - } - logger.Errorf("%v", err) - } - } - - return nil -} - -func statusConfigMapSelector() string { - return fmt.Sprintf("%s=%s,%s=%s", - k8sutil.AppAttr, AppName, - orchestrationStatusKey, provisioningLabelKey, - ) -} - -func (c *Cluster) updateAndCreateOSDsLoop( - createConfig *createConfig, - updateConfig *updateConfig, - minuteTicker *time.Ticker, // pass in the minute ticker so that we always know when a minute passes - errs *provisionErrors, // add errors here -) (shouldRestart bool, err error) { - cmClient := c.context.Clientset.CoreV1().ConfigMaps(c.clusterInfo.Namespace) - ctx := context.TODO() - selector := statusConfigMapSelector() - - listOptions := metav1.ListOptions{ - LabelSelector: selector, - } - configMapList, err := cmClient.List(ctx, listOptions) - if err != nil { - return false, errors.Wrapf(err, "failed to list OSD provisioning status ConfigMaps") - } - - // Process the configmaps initially in case any are already in a processable state - for i := range configMapList.Items { - // reference index to prevent implicit memory aliasing error - c.createOSDsForStatusMap(&configMapList.Items[i], createConfig, errs) - } - - watchOptions := metav1.ListOptions{ - LabelSelector: selector, - Watch: true, - ResourceVersion: configMapList.ResourceVersion, - } - watcher, err := cmClient.Watch(ctx, watchOptions) - defer watcher.Stop() - if err != nil { - return false, errors.Wrapf(err, "failed to start watching OSD provisioning status ConfigMaps") - } - - // tick after a short time of waiting for new OSD provision status configmaps to change state - // in order to allow opportunistic deployment updates while we wait - updateTicker := time.NewTicker(osdOpportunisticUpdateDuration) - defer updateTicker.Stop() - - watchErrMsg := "failed during watch of OSD provisioning status ConfigMaps" - for { - if updateConfig.doneUpdating() && createConfig.doneCreating() { - break // loop - } - - // reset the update ticker (and drain the channel if necessary) to make sure we always - // wait a little bit for an OSD prepare result before opportunistically updating deployments - updateTicker.Reset(osdOpportunisticUpdateDuration) - if len(updateTicker.C) > 0 { - <-updateTicker.C - } - - select { - case event, ok := <-watcher.ResultChan(): - if !ok { - logger.Infof("restarting watcher for OSD provisioning status ConfigMaps. the watcher closed the channel") - return true, nil - } - - if !isAddOrModifyEvent(event.Type) { - // We don't want to process delete events when we delete configmaps after having - // processed them. We also don't want to process BOOKMARK or ERROR events. - logger.Debugf("not processing %s event for object %q", event.Type, eventObjectName(event)) - break // case - } - - configMap, ok := event.Object.(*corev1.ConfigMap) - if !ok { - logger.Errorf("recovering. %s. expected type ConfigMap but found %T", watchErrMsg, configMap) - break // case - } - - c.createOSDsForStatusMap(configMap, createConfig, errs) - - case <-updateTicker.C: - // do an update - updateConfig.updateExistingOSDs(errs) - - case <-minuteTicker.C: - // Check whether we need to cancel the orchestration - if err := controller.CheckForCancelledOrchestration(c.context); err != nil { - return false, err - } - // Log progress - c, cExp := createConfig.progress() - u, uExp := updateConfig.progress() - logger.Infof("waiting... %d of %d OSD prepare jobs have finished processing and %d of %d OSDs have been updated", c, cExp, u, uExp) - } - } - - return false, nil -} - -func isAddOrModifyEvent(t k8swatch.EventType) bool { - switch t { - case k8swatch.Added, k8swatch.Modified: - return true - default: - return false - } -} - -func eventObjectName(e k8swatch.Event) string { - objName := "[could not determine name]" - objMeta, _ := meta.Accessor(e.Object) - if objMeta != nil { - objName = objMeta.GetName() - } - return objName -} - -// Create OSD Deployments for OSDs reported by the prepare job status configmap. -// Do not create OSD deployments if a deployment already exists for a given OSD. -func (c *Cluster) createOSDsForStatusMap( - configMap *corev1.ConfigMap, - createConfig *createConfig, - errs *provisionErrors, // add errors here -) { - nodeOrPVCName, ok := configMap.Labels[nodeLabelKey] - if !ok { - logger.Warningf("missing node label on configmap %s", configMap.Name) - return - } - - status := parseOrchestrationStatus(configMap.Data) - if status == nil { - return - } - nodeOrPVC := "node" - if status.PvcBackedOSD { - nodeOrPVC = "PVC" - } - - logger.Infof("OSD orchestration status for %s %s is %q", nodeOrPVC, nodeOrPVCName, status.Status) - - if status.Status == OrchestrationStatusCompleted { - createConfig.createNewOSDsFromStatus(status, nodeOrPVCName, errs) - c.deleteStatusConfigMap(nodeOrPVCName) // remove the provisioning status configmap - return - } - - if status.Status == OrchestrationStatusFailed { - createConfig.doneWithStatus(nodeOrPVCName) - errs.addError("failed to provision OSD(s) on %s %s. %+v", nodeOrPVC, nodeOrPVCName, status) - c.deleteStatusConfigMap(nodeOrPVCName) // remove the provisioning status configmap - return - } -} - -func statusConfigMapName(nodeOrPVCName string) string { - return k8sutil.TruncateNodeName(orchestrationStatusMapName, nodeOrPVCName) -} - -func (c *Cluster) deleteStatusConfigMap(nodeOrPVCName string) { - if err := c.kv.ClearStore(statusConfigMapName(nodeOrPVCName)); err != nil { - logger.Errorf("failed to remove the status configmap %q. %v", statusConfigMapName(nodeOrPVCName), err) - } -} - -func (c *Cluster) deleteAllStatusConfigMaps() { - ctx := context.TODO() - listOpts := metav1.ListOptions{ - LabelSelector: statusConfigMapSelector(), - } - cmClientset := c.context.Clientset.CoreV1().ConfigMaps(c.clusterInfo.Namespace) - cms, err := cmClientset.List(ctx, listOpts) - if err != nil { - logger.Warningf("failed to clean up any dangling OSD prepare status configmaps. failed to list OSD prepare status configmaps. %v", err) - return - } - for _, cm := range cms.Items { - logger.Debugf("cleaning up dangling OSD prepare status configmap %q", cm.Name) - err := cmClientset.Delete(ctx, cm.Name, metav1.DeleteOptions{}) - if err != nil { - logger.Warningf("failed to clean up dangling OSD prepare status configmap %q. %v", cm.Name, err) - } - } -} diff --git a/pkg/operator/ceph/cluster/osd/status_test.go b/pkg/operator/ceph/cluster/osd/status_test.go deleted file mode 100644 index 3db934e91..000000000 --- a/pkg/operator/ceph/cluster/osd/status_test.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package osd - -import ( - "context" - "encoding/json" - "fmt" - "testing" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes/fake" -) - -func TestOrchestrationStatus(t *testing.T) { - ctx := context.TODO() - clientset := fake.NewSimpleClientset() - clusterInfo := &cephclient.ClusterInfo{ - Namespace: "ns", - CephVersion: cephver.Nautilus, - } - context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: &exectest.MockExecutor{}} - spec := cephv1.ClusterSpec{} - c := New(context, clusterInfo, spec, "myversion") - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - kv := k8sutil.NewConfigMapKVStore(c.clusterInfo.Namespace, clientset, ownerInfo) - nodeName := "mynode" - cmName := fmt.Sprintf(orchestrationStatusMapName, nodeName) - - // status map should not exist yet - _, err := c.context.Clientset.CoreV1().ConfigMaps(c.clusterInfo.Namespace).Get(ctx, cmName, metav1.GetOptions{}) - assert.True(t, errors.IsNotFound(err)) - - // update the status map with some status - status := OrchestrationStatus{Status: OrchestrationStatusOrchestrating, Message: "doing work"} - UpdateNodeStatus(kv, nodeName, status) - - // retrieve the status and verify it - statusMap, err := c.context.Clientset.CoreV1().ConfigMaps(c.clusterInfo.Namespace).Get(ctx, cmName, metav1.GetOptions{}) - assert.Nil(t, err) - assert.NotNil(t, statusMap) - retrievedStatus := parseOrchestrationStatus(statusMap.Data) - assert.NotNil(t, retrievedStatus) - assert.Equal(t, status, *retrievedStatus) -} - -func mockNodeOrchestrationCompletion(c *Cluster, nodeName string, statusMapWatcher *watch.FakeWatcher) { - ctx := context.TODO() - // if no valid osd node, don't need to check its status, return immediately - if len(c.spec.Storage.Nodes) == 0 { - return - } - for { - // wait for the node's orchestration status to change to "starting" - cmName := statusConfigMapName(nodeName) - cm, err := c.context.Clientset.CoreV1().ConfigMaps(c.clusterInfo.Namespace).Get(ctx, cmName, metav1.GetOptions{}) - if err == nil { - status := parseOrchestrationStatus(cm.Data) - if status != nil && status.Status == OrchestrationStatusStarting { - // the node has started orchestration, simulate its completion now by performing 2 tasks: - // 1) update the config map manually (which doesn't trigger a watch event, see https://github.com/kubernetes/kubernetes/issues/54075#issuecomment-337298950) - status = &OrchestrationStatus{ - OSDs: []OSDInfo{ - { - ID: 1, - UUID: "000000-0000-00000001", - Cluster: "rook", - CVMode: "raw", - BlockPath: "/dev/some/path", - }, - }, - Status: OrchestrationStatusCompleted, - } - UpdateNodeStatus(c.kv, nodeName, *status) - - // 2) call modify on the fake watcher so a watch event will get triggered - s, _ := json.Marshal(status) - cm.Data[orchestrationStatusKey] = string(s) - statusMapWatcher.Modify(cm) - break - } else { - logger.Debugf("waiting for node %s orchestration to start. status: %+v", nodeName, *status) - } - } else { - logger.Warningf("failed to get node %s orchestration status, will try again: %+v", nodeName, err) - } - <-time.After(50 * time.Millisecond) - } -} - -func waitForOrchestrationCompletion(c *Cluster, nodeName string, startCompleted *bool) { - ctx := context.TODO() - for { - if *startCompleted { - break - } - cmName := statusConfigMapName(nodeName) - cm, err := c.context.Clientset.CoreV1().ConfigMaps(c.clusterInfo.Namespace).Get(ctx, cmName, metav1.GetOptions{}) - if err == nil { - status := parseOrchestrationStatus(cm.Data) - if status != nil { - logger.Debugf("start has not completed, status is %+v", status) - } - } - <-time.After(50 * time.Millisecond) - } -} diff --git a/pkg/operator/ceph/cluster/osd/topology.go b/pkg/operator/ceph/cluster/osd/topology.go deleted file mode 100644 index 81f7735cf..000000000 --- a/pkg/operator/ceph/cluster/osd/topology.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config provides methods for generating the Ceph config for a Ceph cluster and for -// producing a "ceph.conf" compatible file from the config as well as Ceph command line-compatible -// flags. -package osd - -import ( - "fmt" - - "github.com/rook/rook/pkg/daemon/ceph/client" - corev1 "k8s.io/api/core/v1" -) - -var ( - - // The labels that can be specified with the K8s labels such as topology.kubernetes.io/zone - // These are all at the top layers of the CRUSH map. - KubernetesTopologyLabels = []string{"zone", "region"} - - // The node labels that are supported with the topology.rook.io prefix such as topology.rook.io/rack - // The labels are in order from lowest to highest in the CRUSH hierarchy - CRUSHTopologyLabels = []string{"chassis", "rack", "row", "pdu", "pod", "room", "datacenter"} - - // The list of supported failure domains in the CRUSH map, ordered from lowest to highest - CRUSHMapLevelsOrdered = append([]string{"host"}, append(CRUSHTopologyLabels, KubernetesTopologyLabels...)...) -) - -const ( - topologyLabelPrefix = "topology.rook.io/" -) - -// ExtractTopologyFromLabels extracts rook topology from labels and returns a map from topology type to value -func ExtractOSDTopologyFromLabels(labels map[string]string) (map[string]string, string) { - topology, topologyAffinity := extractTopologyFromLabels(labels) - - // Ensure the topology names are normalized for CRUSH - for name, value := range topology { - topology[name] = client.NormalizeCrushName(value) - } - return topology, topologyAffinity -} - -// ExtractTopologyFromLabels extracts rook topology from labels and returns a map from topology type to value -func extractTopologyFromLabels(labels map[string]string) (map[string]string, string) { - topology := make(map[string]string) - - // The topology affinity for the osd is the lowest topology label found in the hierarchy, - // not including the host name - var topologyAffinity string - - // check for the region k8s topology label that was deprecated in 1.17 - const regionLabel = "region" - region, ok := labels[corev1.LabelZoneRegion] - if ok { - topology[regionLabel] = region - topologyAffinity = formatTopologyAffinity(corev1.LabelZoneRegion, region) - } - - // check for the region k8s topology label that is GA in 1.17. - region, ok = labels[corev1.LabelZoneRegionStable] - if ok { - topology[regionLabel] = region - topologyAffinity = formatTopologyAffinity(corev1.LabelZoneRegionStable, region) - } - - // check for the zone k8s topology label that was deprecated in 1.17 - const zoneLabel = "zone" - zone, ok := labels[corev1.LabelZoneFailureDomain] - if ok { - topology[zoneLabel] = zone - topologyAffinity = formatTopologyAffinity(corev1.LabelZoneFailureDomain, zone) - } - - // check for the zone k8s topology label that is GA in 1.17. - zone, ok = labels[corev1.LabelZoneFailureDomainStable] - if ok { - topology[zoneLabel] = zone - topologyAffinity = formatTopologyAffinity(corev1.LabelZoneFailureDomainStable, zone) - } - - // get host - host, ok := labels[corev1.LabelHostname] - if ok { - topology["host"] = host - } - - // get the labels for the CRUSH map hierarchy - // iterate in reverse order so that the last topology found will be the lowest level in the hierarchy - // for the topology affinity - for i := len(CRUSHTopologyLabels) - 1; i >= 0; i-- { - topologyID := CRUSHTopologyLabels[i] - label := topologyLabelPrefix + topologyID - if value, ok := labels[label]; ok { - topology[topologyID] = value - topologyAffinity = formatTopologyAffinity(label, value) - } - } - return topology, topologyAffinity -} - -func formatTopologyAffinity(label, value string) string { - return fmt.Sprintf("%s=%s", label, value) -} diff --git a/pkg/operator/ceph/cluster/osd/topology_test.go b/pkg/operator/ceph/cluster/osd/topology_test.go deleted file mode 100644 index 30b73db9c..000000000 --- a/pkg/operator/ceph/cluster/osd/topology_test.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config provides methods for generating the Ceph config for a Ceph cluster and for -// producing a "ceph.conf" compatible file from the config as well as Ceph command line-compatible -// flags. -package osd - -import ( - "testing" - - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" -) - -func TestOrderedCRUSHLabels(t *testing.T) { - assert.Equal(t, "host", CRUSHMapLevelsOrdered[0]) - assert.Equal(t, "chassis", CRUSHMapLevelsOrdered[1]) - assert.Equal(t, "rack", CRUSHMapLevelsOrdered[2]) - assert.Equal(t, "row", CRUSHMapLevelsOrdered[3]) - assert.Equal(t, "pdu", CRUSHMapLevelsOrdered[4]) - assert.Equal(t, "pod", CRUSHMapLevelsOrdered[5]) - assert.Equal(t, "room", CRUSHMapLevelsOrdered[6]) - assert.Equal(t, "datacenter", CRUSHMapLevelsOrdered[7]) - assert.Equal(t, "zone", CRUSHMapLevelsOrdered[8]) - assert.Equal(t, "region", CRUSHMapLevelsOrdered[9]) -} - -func TestCleanTopologyLabels(t *testing.T) { - // load all the expected labels - nodeLabels := map[string]string{ - corev1.LabelZoneRegionStable: "r.region", - corev1.LabelZoneFailureDomainStable: "z.zone", - "kubernetes.io/hostname": "host.name", - "topology.rook.io/rack": "r.rack", - "topology.rook.io/row": "r.row", - "topology.rook.io/datacenter": "d.datacenter", - } - topology, affinity := ExtractOSDTopologyFromLabels(nodeLabels) - assert.Equal(t, 6, len(topology)) - assert.Equal(t, "r-region", topology["region"]) - assert.Equal(t, "z-zone", topology["zone"]) - assert.Equal(t, "host-name", topology["host"]) - assert.Equal(t, "r-rack", topology["rack"]) - assert.Equal(t, "r-row", topology["row"]) - assert.Equal(t, "d-datacenter", topology["datacenter"]) - assert.Equal(t, "topology.rook.io/rack=r.rack", affinity) -} - -func TestTopologyLabels(t *testing.T) { - nodeLabels := map[string]string{} - topology, affinity := extractTopologyFromLabels(nodeLabels) - assert.Equal(t, 0, len(topology)) - assert.Equal(t, "", affinity) - - // invalid non-namespaced zone and region labels are simply ignored - nodeLabels = map[string]string{ - "region": "badregion", - "zone": "badzone", - } - topology, affinity = extractTopologyFromLabels(nodeLabels) - assert.Equal(t, 0, len(topology)) - assert.Equal(t, "", affinity) - - // invalid zone and region labels are simply ignored - nodeLabels = map[string]string{ - "topology.rook.io/region": "r1", - "topology.rook.io/zone": "z1", - } - topology, affinity = extractTopologyFromLabels(nodeLabels) - assert.Equal(t, 0, len(topology)) - assert.Equal(t, "", affinity) - - // load all the expected labels - nodeLabels = map[string]string{ - corev1.LabelZoneRegionStable: "r1", - corev1.LabelZoneFailureDomainStable: "z1", - "kubernetes.io/hostname": "myhost", - "topology.rook.io/rack": "rack1", - "topology.rook.io/row": "row1", - "topology.rook.io/datacenter": "d1", - } - topology, affinity = extractTopologyFromLabels(nodeLabels) - assert.Equal(t, 6, len(topology)) - assert.Equal(t, "r1", topology["region"]) - assert.Equal(t, "z1", topology["zone"]) - assert.Equal(t, "myhost", topology["host"]) - assert.Equal(t, "rack1", topology["rack"]) - assert.Equal(t, "row1", topology["row"]) - assert.Equal(t, "d1", topology["datacenter"]) - assert.Equal(t, "topology.rook.io/rack=rack1", affinity) - - // ensure deprecated k8s labels are loaded - nodeLabels = map[string]string{ - corev1.LabelZoneRegion: "r1", - corev1.LabelZoneFailureDomain: "z1", - } - topology, affinity = extractTopologyFromLabels(nodeLabels) - assert.Equal(t, 2, len(topology)) - assert.Equal(t, "r1", topology["region"]) - assert.Equal(t, "z1", topology["zone"]) - assert.Equal(t, "failure-domain.beta.kubernetes.io/zone=z1", affinity) - - // ensure deprecated k8s labels are overridden - nodeLabels = map[string]string{ - corev1.LabelZoneRegionStable: "r1", - corev1.LabelZoneFailureDomainStable: "z1", - corev1.LabelZoneRegion: "oldregion", - corev1.LabelZoneFailureDomain: "oldzone", - } - topology, affinity = extractTopologyFromLabels(nodeLabels) - assert.Equal(t, 2, len(topology)) - assert.Equal(t, "r1", topology["region"]) - assert.Equal(t, "z1", topology["zone"]) - assert.Equal(t, "topology.kubernetes.io/zone=z1", affinity) - - // invalid labels under topology.rook.io return an error - nodeLabels = map[string]string{ - "topology.rook.io/row/bad": "r1", - } - topology, affinity = extractTopologyFromLabels(nodeLabels) - assert.Equal(t, 0, len(topology)) - assert.Equal(t, "", affinity) -} diff --git a/pkg/operator/ceph/cluster/osd/update.go b/pkg/operator/ceph/cluster/osd/update.go deleted file mode 100644 index e5dcfdbdf..000000000 --- a/pkg/operator/ceph/cluster/osd/update.go +++ /dev/null @@ -1,349 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "context" - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// THE LIBRARY PROVIDED BY THIS FILE IS NOT THREAD SAFE - -var ( - // allow unit tests to override these values - maxUpdatesInParallel = 20 - updateMultipleDeploymentsAndWaitFunc = k8sutil.UpdateMultipleDeploymentsAndWait - deploymentOnNodeFunc = deploymentOnNode - deploymentOnPVCFunc = deploymentOnPVC - shouldCheckOkToStopFunc = cephclient.OSDUpdateShouldCheckOkToStop -) - -type updateConfig struct { - cluster *Cluster - provisionConfig *provisionConfig - queue *updateQueue // these OSDs need updated - numUpdatesNeeded int // the number of OSDs that needed updating - deployments *existenceList // these OSDs have existing deployments -} - -func (c *Cluster) newUpdateConfig( - provisionConfig *provisionConfig, - queue *updateQueue, - deployments *existenceList, -) *updateConfig { - return &updateConfig{ - c, - provisionConfig, - queue, - queue.Len(), - deployments, - } -} - -func (c *updateConfig) progress() (completed, initial int) { - return (c.numUpdatesNeeded - c.queue.Len()), c.numUpdatesNeeded -} - -func (c *updateConfig) doneUpdating() bool { - return c.queue.Len() == 0 -} - -func (c *updateConfig) updateExistingOSDs(errs *provisionErrors) { - ctx := context.TODO() - - if c.doneUpdating() { - return // no more OSDs to update - } - osdIDQuery, _ := c.queue.Pop() - - var osdIDs []int - var err error - if !shouldCheckOkToStopFunc(c.cluster.context, c.cluster.clusterInfo) { - // If we should not check ok-to-stop, then only process one OSD at a time. There are likely - // less than 3 OSDs in the cluster or the cluster is on a single node. E.g., in CI :wink:. - osdIDs = []int{osdIDQuery} - } else { - osdIDs, err = cephclient.OSDOkToStop(c.cluster.context, c.cluster.clusterInfo, osdIDQuery, maxUpdatesInParallel) - if err != nil { - if c.cluster.spec.ContinueUpgradeAfterChecksEvenIfNotHealthy { - logger.Infof("OSD %d is not ok-to-stop but 'continueUpgradeAfterChecksEvenIfNotHealthy' is true, so continuing to update it", osdIDQuery) - osdIDs = []int{osdIDQuery} // make sure to update the queried OSD - } else { - logger.Infof("OSD %d is not ok-to-stop. will try updating it again later", osdIDQuery) - c.queue.Push(osdIDQuery) // push back onto queue to make sure we retry it later - return - } - } - } - - logger.Debugf("updating OSDs: %v", osdIDs) - - updatedDeployments := make([]*appsv1.Deployment, 0, len(osdIDs)) - listIDs := []string{} // use this to build the k8s api selector query - for _, osdID := range osdIDs { - if !c.deployments.Exists(osdID) { - logger.Debugf("not updating deployment for OSD %d that is newly created", osdID) - continue - } - - // osdIDQuery which has been popped off the queue but it does need to be updated - if osdID != osdIDQuery && !c.queue.Exists(osdID) { - logger.Debugf("not updating deployment for OSD %d that is not in the update queue. the OSD has already been updated", osdID) - continue - } - - depName := deploymentName(osdID) - dep, err := c.cluster.context.Clientset.AppsV1().Deployments(c.cluster.clusterInfo.Namespace).Get(ctx, depName, metav1.GetOptions{}) - if err != nil { - errs.addError("failed to update OSD %d. failed to find existing deployment %q. %v", osdID, depName, err) - continue - } - osdInfo, err := c.cluster.getOSDInfo(dep) - if err != nil { - errs.addError("failed to update OSD %d. failed to extract OSD info from existing deployment %q. %v", osdID, depName, err) - continue - } - - // backward compatibility for old deployments - if osdInfo.DeviceClass == "" { - deviceClassInfo, err := cephclient.OSDDeviceClasses(c.cluster.context, c.cluster.clusterInfo, []string{strconv.Itoa(osdID)}) - if err != nil { - logger.Errorf("failed to get device class for existing deployment %q. %v", depName, err) - } else { - osdInfo.DeviceClass = deviceClassInfo[0].DeviceClass - } - } - - nodeOrPVCName, err := getNodeOrPVCName(dep) - if err != nil { - errs.addError("%v", errors.Wrapf(err, "failed to update OSD %d", osdID)) - continue - } - - var updatedDep *appsv1.Deployment - if osdIsOnPVC(dep) { - logger.Infof("updating OSD %d on PVC %q", osdID, nodeOrPVCName) - updatedDep, err = deploymentOnPVCFunc(c.cluster, osdInfo, nodeOrPVCName, c.provisionConfig) - - message := fmt.Sprintf("Processing OSD %d on PVC %q", osdID, nodeOrPVCName) - updateConditionFunc(c.cluster.context, c.cluster.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) - } else { - if !c.cluster.ValidStorage.NodeExists(nodeOrPVCName) { - // node will not reconcile, so don't update the deployment - // allow the OSD health checker to remove the OSD - logger.Warningf( - "not updating OSD %d on node %q. node no longer exists in the storage spec. "+ - "if the user wishes to remove OSDs from the node, they must do so manually. "+ - "Rook will not remove OSDs from nodes that are removed from the storage spec in order to prevent accidental data loss", - osdID, nodeOrPVCName) - continue - } - - logger.Infof("updating OSD %d on node %q", osdID, nodeOrPVCName) - updatedDep, err = deploymentOnNodeFunc(c.cluster, osdInfo, nodeOrPVCName, c.provisionConfig) - - message := fmt.Sprintf("Processing OSD %d on node %q", osdID, nodeOrPVCName) - updateConditionFunc(c.cluster.context, c.cluster.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) - } - if err != nil { - errs.addError("%v", errors.Wrapf(err, "failed to update OSD %d", osdID)) - continue - } - - updatedDeployments = append(updatedDeployments, updatedDep) - listIDs = append(listIDs, strconv.Itoa(osdID)) - } - - // when waiting on deployments to be updated, only list OSDs we intend to update specifically by ID - listFunc := c.cluster.getFuncToListDeploymentsWithIDs(listIDs) - - failures := updateMultipleDeploymentsAndWaitFunc(c.cluster.context.Clientset, updatedDeployments, listFunc) - for _, f := range failures { - errs.addError("%v", errors.Wrapf(f.Error, "failed to update OSD deployment %q", f.ResourceName)) - } - - // If there were failures, don't retry them. If it's a transitory k8s/etcd issue, the next - // reconcile should succeed. If it's a different issue, it will always error. - c.queue.Remove(osdIDs) -} - -// getOSDUpdateInfo returns an update queue of OSDs which need updated and an existence list of OSD -// Deployments which already exist. -func (c *Cluster) getOSDUpdateInfo(errs *provisionErrors) (*updateQueue, *existenceList, error) { - ctx := context.TODO() - namespace := c.clusterInfo.Namespace - - selector := fmt.Sprintf("%s=%s", k8sutil.AppAttr, AppName) - listOpts := metav1.ListOptions{ - // list only rook-ceph-osd Deployments - LabelSelector: selector, - } - deps, err := c.context.Clientset.AppsV1().Deployments(namespace).List(ctx, listOpts) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to query existing OSD deployments to see if they need updated") - } - - updateQueue := newUpdateQueueWithCapacity(len(deps.Items)) - existenceList := newExistenceListWithCapacity(len(deps.Items)) - - for i := range deps.Items { - id, err := getOSDID(&deps.Items[i]) // avoid implicit memory aliasing by indexing - if err != nil { - // add a question to the user AFTER the error text to help them recover from user error - errs.addError("%v. did a user create their own deployment with label %q?", selector, err) - continue - } - - // all OSD deployments should be marked as existing - existenceList.Add(id) - updateQueue.Push(id) - } - - return updateQueue, existenceList, nil -} - -// An updateQueue keeps track of OSDs which need updated. -type updateQueue struct { - q []int // just a list of OSD IDs -} - -// Create a new updateQueue with capacity reserved. -func newUpdateQueueWithCapacity(cap int) *updateQueue { - return &updateQueue{ - q: make([]int, 0, cap), - } -} - -func newUpdateQueueWithIDs(ids ...int) *updateQueue { - return &updateQueue{ - q: ids, - } -} - -// Len returns the length of the queue. -func (q *updateQueue) Len() int { - return len(q.q) -} - -// Push pushes an item onto the end of the queue. -func (q *updateQueue) Push(osdID int) { - q.q = append(q.q, osdID) -} - -// Pop pops an item off the beginning of the queue. -// Returns -1 and ok=false if the queue is empty. Otherwise, returns an OSD ID and ok=true. -func (q *updateQueue) Pop() (osdID int, ok bool) { - if q.Len() == 0 { - return -1, false - } - - osdID = q.q[0] - q.q = q.q[1:] - return osdID, true -} - -// Exists returns true if the item exists in the queue. -func (q *updateQueue) Exists(osdID int) bool { - for _, id := range q.q { - if id == osdID { - return true - } - } - return false -} - -// Remove removes the items from the queue if they exist. -func (q *updateQueue) Remove(osdIDs []int) { - shouldRemove := func(rid int) bool { - for _, id := range osdIDs { - if id == rid { - return true - } - } - return false - } - - lastIdx := 0 - for idx, osdID := range q.q { - if !shouldRemove(osdID) { - // do removal by shifting slice items that should be kept into the next good position in - // the slice, and then reduce the slice capacity to match the number of kept items - q.q[lastIdx] = q.q[idx] - lastIdx++ - } - } - q.q = q.q[:lastIdx] -} - -// An existenceList keeps track of which OSDs already have Deployments created for them that is -// queryable in O(1) time. -type existenceList struct { - m map[int]bool -} - -// Create a new existenceList with capacity reserved. -func newExistenceListWithCapacity(cap int) *existenceList { - return &existenceList{ - m: make(map[int]bool, cap), - } -} - -func newExistenceListWithIDs(ids ...int) *existenceList { - e := newExistenceListWithCapacity(len(ids)) - for _, id := range ids { - e.Add(id) - } - return e -} - -// Len returns the length of the existence list, the number of existing items. -func (e *existenceList) Len() int { - return len(e.m) -} - -// Add adds an item to the existenceList. -func (e *existenceList) Add(osdID int) { - e.m[osdID] = true -} - -// Exists returns true if an item is recorded in the existence list or false if it does not. -func (e *existenceList) Exists(osdID int) bool { - _, ok := e.m[osdID] - return ok -} - -// return a function that will list only OSD deployments with the IDs given -func (c *Cluster) getFuncToListDeploymentsWithIDs(osdIDs []string) func() (*appsv1.DeploymentList, error) { - ctx := context.TODO() - selector := fmt.Sprintf("ceph-osd-id in (%s)", strings.Join(osdIDs, ", ")) - listOpts := metav1.ListOptions{ - LabelSelector: selector, // e.g. 'ceph-osd-id in (1, 3, 5, 7, 9)' - } - return func() (*appsv1.DeploymentList, error) { - return c.context.Clientset.AppsV1().Deployments(c.clusterInfo.Namespace).List(ctx, listOpts) - } -} diff --git a/pkg/operator/ceph/cluster/osd/update_test.go b/pkg/operator/ceph/cluster/osd/update_test.go deleted file mode 100644 index f077e15cf..000000000 --- a/pkg/operator/ceph/cluster/osd/update_test.go +++ /dev/null @@ -1,718 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "context" - "fmt" - "strconv" - "testing" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephclientfake "github.com/rook/rook/pkg/daemon/ceph/client/fake" - "github.com/rook/rook/pkg/operator/ceph/controller" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" -) - -func Test_updateExistingOSDs(t *testing.T) { - namespace := "my-namespace" - - logger.SetLevel(capnslog.DEBUG) - - oldUpdateFunc := updateMultipleDeploymentsAndWaitFunc - oldNodeFunc := deploymentOnNodeFunc - oldPVCFunc := deploymentOnPVCFunc - oldConditionFunc := updateConditionFunc - oldShouldCheckFunc := shouldCheckOkToStopFunc - defer func() { - updateMultipleDeploymentsAndWaitFunc = oldUpdateFunc - deploymentOnNodeFunc = oldNodeFunc - deploymentOnPVCFunc = oldPVCFunc - updateConditionFunc = oldConditionFunc - shouldCheckOkToStopFunc = oldShouldCheckFunc - }() - - var executor *exectest.MockExecutor // will be defined later - - // inputs - var ( - updateQueue *updateQueue - existingDeployments *existenceList - clientset *fake.Clientset - ) - - // behavior control - var ( - updateInjectFailures k8sutil.Failures // return failures from mocked updateDeploymentAndWaitFunc - returnOkToStopIDs []int // return these IDs are ok-to-stop (or not ok to stop if empty) - forceUpgradeIfUnhealthy bool - ) - - // intermediates (created from inputs) - var ( - ctx *clusterd.Context - c *Cluster - updateConfig *updateConfig - ) - - // outputs - var ( - osdToBeQueried int // this OSD ID should be queried - deploymentsUpdated []string // updateDeploymentAndWaitFunc adds deployments to this list - osdsOnPVCs []int // deploymentOnPVCFunc adds OSD IDs to this list - osdsOnNodes []int // deploymentOnPVCFunc adds OSD IDs to this list - errs *provisionErrors - ) - - doSetup := func() { - // set up intermediates - ctx = &clusterd.Context{ - Clientset: clientset, - Executor: executor, - } - clusterInfo := &cephclient.ClusterInfo{ - Namespace: namespace, - CephVersion: cephver.Pacific, - } - clusterInfo.SetName("mycluster") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - spec := cephv1.ClusterSpec{ - ContinueUpgradeAfterChecksEvenIfNotHealthy: forceUpgradeIfUnhealthy, - } - c = New(ctx, clusterInfo, spec, "rook/rook:master") - config := c.newProvisionConfig() - updateConfig = c.newUpdateConfig(config, updateQueue, existingDeployments) - - // prepare outputs - deploymentsUpdated = []string{} - osdsOnPVCs = []int{} - osdsOnNodes = []int{} - errs = newProvisionErrors() - } - - // stub out the conditionExportFunc to do nothing. we do not have a fake Rook interface that - // allows us to interact with a CephCluster resource like the fake K8s clientset. - updateConditionFunc = func(c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status corev1.ConditionStatus, reason cephv1.ConditionReason, message string) { - // do nothing - } - shouldCheckOkToStopFunc = func(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo) bool { - // always check. if shouldCheckOkToStop is not implemented correctly, single-node CI tests - // will fail, which is a more thorough test than we could make in unit tests. - return true - } - - updateMultipleDeploymentsAndWaitFunc = - func( - clientset kubernetes.Interface, - deployments []*appsv1.Deployment, - listFunc func() (*appsv1.DeploymentList, error), - ) k8sutil.Failures { - for _, d := range deployments { - deploymentsUpdated = append(deploymentsUpdated, d.Name) - } - return updateInjectFailures - } - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - t.Logf("command: %s %v", command, args) - if args[0] == "osd" { - if args[1] == "ok-to-stop" { - queriedID := args[2] - if strconv.Itoa(osdToBeQueried) != queriedID { - err := errors.Errorf("OSD %d should have been queried, but %s was queried instead", osdToBeQueried, queriedID) - t.Error(err) - return "", err - } - if len(returnOkToStopIDs) > 0 { - return cephclientfake.OsdOkToStopOutput(osdToBeQueried, returnOkToStopIDs, true), nil - } - return cephclientfake.OsdOkToStopOutput(osdToBeQueried, []int{}, true), errors.Errorf("induced error") - } - if args[1] == "crush" && args[2] == "get-device-class" { - return cephclientfake.OSDDeviceClassOutput(args[3]), nil - } - } - panic(fmt.Sprintf("unexpected command %q with args %v", command, args)) - }, - } - - // simple wrappers to allow us to count how many OSDs on nodes/PVCs are identified - deploymentOnNodeFunc = func(c *Cluster, osd OSDInfo, nodeName string, config *provisionConfig) (*appsv1.Deployment, error) { - osdsOnNodes = append(osdsOnNodes, osd.ID) - return deploymentOnNode(c, osd, nodeName, config) - } - deploymentOnPVCFunc = func(c *Cluster, osd OSDInfo, pvcName string, config *provisionConfig) (*appsv1.Deployment, error) { - osdsOnPVCs = append(osdsOnPVCs, osd.ID) - return deploymentOnPVC(c, osd, pvcName, config) - } - - addDeploymentOnNode := func(nodeName string, osdID int) { - d := getDummyDeploymentOnNode(clientset, c, nodeName, osdID) - _, err := clientset.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) - if err != nil { - panic(err) - } - } - - addDeploymentOnPVC := func(pvcName string, osdID int) { - d := getDummyDeploymentOnPVC(clientset, c, pvcName, osdID) - _, err := clientset.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) - if err != nil { - panic(err) - } - } - - t.Run("no items in the update queue should be a noop", func(t *testing.T) { - clientset = fake.NewSimpleClientset() - updateQueue = newUpdateQueueWithIDs() - existingDeployments = newExistenceListWithIDs(0, 2, 4, 6) - forceUpgradeIfUnhealthy = false - updateInjectFailures = k8sutil.Failures{} - doSetup() - addDeploymentOnNode("node0", 0) - addDeploymentOnPVC("pvc2", 2) - addDeploymentOnNode("node1", 4) - addDeploymentOnPVC("pvc6", 6) - - osdToBeQueried = -1 // this will make any OSD query fail - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, []string{}) - }) - - t.Run("ok to stop one OSD at a time", func(t *testing.T) { - clientset = fake.NewSimpleClientset() - // reminder that updateQueue pops things off the queue from the front, so the leftmost item - // will be the one queried - updateQueue = newUpdateQueueWithIDs(0, 2, 4, 6) - existingDeployments = newExistenceListWithIDs(0, 2, 4, 6) - forceUpgradeIfUnhealthy = false - updateInjectFailures = k8sutil.Failures{} - doSetup() - addDeploymentOnNode("node0", 0) - addDeploymentOnPVC("pvc2", 2) - addDeploymentOnNode("node1", 4) - addDeploymentOnPVC("pvc6", 6) - - for _, i := range []int{0, 2, 4, 6} { - osdToBeQueried = i - returnOkToStopIDs = []int{i} - deploymentsUpdated = []string{} - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, []string{deploymentName(i)}) - } - assert.ElementsMatch(t, osdsOnNodes, []int{0, 4}) - assert.ElementsMatch(t, osdsOnPVCs, []int{2, 6}) - - assert.Equal(t, 0, updateQueue.Len()) // should be done with updates - }) - - t.Run("ok to stop 3 OSDs at a time", func(t *testing.T) { - clientset = fake.NewSimpleClientset() - updateQueue = newUpdateQueueWithIDs(0, 2, 4, 6) - existingDeployments = newExistenceListWithIDs(0, 2, 4, 6) - forceUpgradeIfUnhealthy = false - updateInjectFailures = k8sutil.Failures{} - doSetup() - addDeploymentOnNode("node0", 0) - addDeploymentOnPVC("pvc2", 2) - addDeploymentOnNode("node1", 4) - addDeploymentOnPVC("pvc6", 6) - - osdToBeQueried = 0 - returnOkToStopIDs = []int{0, 4, 6} - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, - []string{deploymentName(0), deploymentName(4), deploymentName(6)}) - - // should NOT be done with updates - // this also tests that updateQueue.Len() directly affects doneUpdating() - assert.Equal(t, 1, updateQueue.Len()) - assert.False(t, updateConfig.doneUpdating()) - - deploymentsUpdated = []string{} - osdToBeQueried = 2 - returnOkToStopIDs = []int{2} - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, - []string{deploymentName(2)}) - - // should be done with updates - // this also tests that updateQueue.Len() directly affects doneUpdating() - assert.Equal(t, 0, updateQueue.Len()) - assert.True(t, updateConfig.doneUpdating()) - }) - - t.Run("ok to stop more OSDs than are in the update queue", func(t *testing.T) { - clientset = fake.NewSimpleClientset() - updateQueue = newUpdateQueueWithIDs(2, 0) - existingDeployments = newExistenceListWithIDs(6, 4, 2, 0) - forceUpgradeIfUnhealthy = false - updateInjectFailures = k8sutil.Failures{} - doSetup() - addDeploymentOnNode("node0", 0) - addDeploymentOnPVC("pvc2", 2) - addDeploymentOnNode("node1", 4) - addDeploymentOnPVC("pvc6", 6) - - osdToBeQueried = 2 - returnOkToStopIDs = []int{2, 4, 6} - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, []string{deploymentName(2)}) - - deploymentsUpdated = []string{} - osdToBeQueried = 0 - returnOkToStopIDs = []int{0, 6} - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, []string{deploymentName(0)}) - - assert.Equal(t, 0, updateQueue.Len()) // should be done with updates - }) - - t.Run("ok to stop OSDS not in existence list (newly-created OSDs)", func(t *testing.T) { - clientset = fake.NewSimpleClientset() - updateQueue = newUpdateQueueWithIDs(2, 0) - existingDeployments = newExistenceListWithIDs(2, 0) - forceUpgradeIfUnhealthy = false - updateInjectFailures = k8sutil.Failures{} - doSetup() - addDeploymentOnNode("node0", 0) - addDeploymentOnPVC("pvc2", 2) - addDeploymentOnNode("node1", 4) - addDeploymentOnPVC("pvc6", 6) - - osdToBeQueried = 2 - returnOkToStopIDs = []int{2, 4, 6} - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, []string{deploymentName(2)}) - - deploymentsUpdated = []string{} - osdToBeQueried = 0 - returnOkToStopIDs = []int{0, 6} - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, []string{deploymentName(0)}) - - assert.Equal(t, 0, updateQueue.Len()) // should be done with updates - }) - - t.Run("not ok to stop OSD", func(t *testing.T) { - clientset = fake.NewSimpleClientset() - updateQueue = newUpdateQueueWithIDs(2) - existingDeployments = newExistenceListWithIDs(0, 2, 4, 6) - forceUpgradeIfUnhealthy = false - updateInjectFailures = k8sutil.Failures{} - doSetup() - addDeploymentOnNode("node0", 0) - addDeploymentOnPVC("pvc2", 2) - addDeploymentOnNode("node1", 4) - addDeploymentOnPVC("pvc6", 6) - - osdToBeQueried = 2 - returnOkToStopIDs = []int{} // not ok to stop - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, []string{}) - assert.Equal(t, 1, updateQueue.Len()) // the OSD should have been requeued - - osdToBeQueried = 2 - returnOkToStopIDs = []int{2} - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, []string{deploymentName(2)}) - assert.Equal(t, 0, updateQueue.Len()) // the OSD should now have been removed from the queue - }) - - t.Run("continueUpgradesAfterChecksEvenIfUnhealthy = true", func(t *testing.T) { - clientset = fake.NewSimpleClientset() - updateQueue = newUpdateQueueWithIDs(2) - existingDeployments = newExistenceListWithIDs(0, 2, 4, 6) - forceUpgradeIfUnhealthy = true // FORCE UPDATES - updateInjectFailures = k8sutil.Failures{} - doSetup() - addDeploymentOnNode("node0", 0) - addDeploymentOnPVC("pvc2", 2) - addDeploymentOnNode("node1", 4) - addDeploymentOnPVC("pvc6", 6) - - osdToBeQueried = 2 - returnOkToStopIDs = []int{} // NOT ok-to-stop - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, []string{deploymentName(2)}) - - assert.Equal(t, 0, updateQueue.Len()) // should be done with updates - }) - - t.Run("failures updating deployments", func(t *testing.T) { - clientset = fake.NewSimpleClientset() - updateQueue = newUpdateQueueWithIDs(0, 2, 4, 6) - existingDeployments = newExistenceListWithIDs(0, 2, 4, 6) - forceUpgradeIfUnhealthy = false - updateInjectFailures = k8sutil.Failures{} - doSetup() - addDeploymentOnNode("node0", 0) - addDeploymentOnPVC("pvc2", 2) - addDeploymentOnNode("node1", 4) - addDeploymentOnPVC("pvc6", 6) - - osdToBeQueried = 0 - returnOkToStopIDs = []int{0, 6} - updateInjectFailures = k8sutil.Failures{ - {ResourceName: deploymentName(6), Error: errors.Errorf("induced failure updating OSD 6")}, - } - updateConfig.updateExistingOSDs(errs) - assert.Equal(t, 1, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, - []string{deploymentName(0), deploymentName(6)}) - - deploymentsUpdated = []string{} - osdToBeQueried = 2 - returnOkToStopIDs = []int{2, 4} - updateInjectFailures = k8sutil.Failures{ - {ResourceName: deploymentName(2), Error: errors.Errorf("induced failure updating OSD 2")}, - {ResourceName: deploymentName(4), Error: errors.Errorf("induced failure waiting for OSD 4")}, - } - updateConfig.updateExistingOSDs(errs) - assert.Equal(t, 3, errs.len()) // errors should be appended to the same provisionErrors struct - assert.ElementsMatch(t, deploymentsUpdated, - []string{deploymentName(2), deploymentName(4)}) - - assert.Zero(t, updateQueue.Len()) // errors should not be requeued - }) - - t.Run("failure due to OSD deployment with bad info", func(t *testing.T) { - clientset = fake.NewSimpleClientset() - updateQueue = newUpdateQueueWithIDs(0, 6) - existingDeployments = newExistenceListWithIDs(0, 2, 4, 6) - forceUpgradeIfUnhealthy = false - updateInjectFailures = k8sutil.Failures{} - doSetup() - addDeploymentOnNode("node0", 0) - addDeploymentOnPVC("pvc2", 2) - addDeploymentOnNode("node1", 4) - addDeploymentOnPVC("pvc6", 6) - // give OSD 6 bad info by removing env vars from primary container - deploymentClient := clientset.AppsV1().Deployments(namespace) - d, err := deploymentClient.Get(context.TODO(), deploymentName(6), metav1.GetOptions{}) - if err != nil { - panic(err) - } - d.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{} - _, err = deploymentClient.Update(context.TODO(), d, metav1.UpdateOptions{}) - if err != nil { - panic(err) - } - deploymentsUpdated = []string{} - - osdToBeQueried = 0 - returnOkToStopIDs = []int{0, 6} - updateConfig.updateExistingOSDs(errs) - assert.Equal(t, 1, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, - []string{deploymentName(0)}) - - assert.Zero(t, updateQueue.Len()) // errors should not be requeued - }) - - t.Run("do not update OSDs on nodes removed from the storage spec", func(t *testing.T) { - clientset = fake.NewSimpleClientset() - // reminder that updateQueue pops things off the queue from the front, so the leftmost item - // will be the one queried - updateQueue = newUpdateQueueWithIDs(0, 4) - existingDeployments = newExistenceListWithIDs(0, 4) - forceUpgradeIfUnhealthy = false - updateInjectFailures = k8sutil.Failures{} - doSetup() - addDeploymentOnNode("node0", 0) - addDeploymentOnNode("node1", 4) - - // Remove "node0" from valid storage (user no longer wants it) - assert.Equal(t, "node0", c.ValidStorage.Nodes[0].Name) - c.ValidStorage.Nodes = c.ValidStorage.Nodes[1:] - t.Logf("valid storage nodes: %+v", c.ValidStorage.Nodes) - - osdToBeQueried = 0 - returnOkToStopIDs = []int{0, 4} - updateConfig.updateExistingOSDs(errs) - assert.Zero(t, errs.len()) - assert.ElementsMatch(t, deploymentsUpdated, []string{deploymentName(4)}) - - assert.ElementsMatch(t, osdsOnNodes, []int{4}) - assert.ElementsMatch(t, osdsOnPVCs, []int{}) - - assert.Equal(t, 0, updateQueue.Len()) // should be done with updates - }) -} - -func Test_getOSDUpdateInfo(t *testing.T) { - namespace := "rook-ceph" - cephImage := "quay.io/ceph/ceph:v15" - - // NOTE: all tests share the same clientset - clientset := fake.NewSimpleClientset() - ctx := &clusterd.Context{ - Clientset: clientset, - } - clusterInfo := &cephclient.ClusterInfo{ - Namespace: namespace, - CephVersion: cephver.Nautilus, - } - clusterInfo.SetName("mycluster") - clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - spec := cephv1.ClusterSpec{ - CephVersion: cephv1.CephVersionSpec{Image: cephImage}, - } - c := New(ctx, clusterInfo, spec, "rook/rook:master") - - var errs *provisionErrors - var d *appsv1.Deployment - - t.Run("cluster with no existing deployments", func(t *testing.T) { - errs = newProvisionErrors() - updateQueue, existenceList, err := c.getOSDUpdateInfo(errs) - assert.NoError(t, err) - assert.Zero(t, errs.len()) - assert.Zero(t, updateQueue.Len()) - assert.Zero(t, existenceList.Len()) - }) - - t.Run("cluster in namespace with existing deployments, but none are OSDs", func(t *testing.T) { - // random deployment in this namespace - addTestDeployment(clientset, "non-rook-deployment", namespace, map[string]string{}) - - // mon.a in this namespace - l := controller.CephDaemonAppLabels("rook-ceph-mon", namespace, "mon", "a", true) - addTestDeployment(clientset, "rook-ceph-mon-a", namespace, l) - - // osd.1 and 3 in another namespace (another Rook cluster) - clusterInfo2 := &cephclient.ClusterInfo{ - Namespace: "other-namespace", - CephVersion: cephver.Nautilus, - } - clusterInfo2.SetName("other-cluster") - clusterInfo2.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) - c2 := New(ctx, clusterInfo2, spec, "rook/rook:master") - - // osd.1 on PVC in "other-namespace" - d = getDummyDeploymentOnPVC(clientset, c2, "pvc1", 1) - createDeploymentOrPanic(clientset, d) - - // osd.3 on Node in "other-namespace" - d = getDummyDeploymentOnNode(clientset, c2, "node3", 3) - createDeploymentOrPanic(clientset, d) - - errs = newProvisionErrors() - updateQueue, existenceList, err := c.getOSDUpdateInfo(errs) - assert.NoError(t, err) - assert.Zero(t, errs.len()) - assert.Zero(t, updateQueue.Len()) - assert.Zero(t, existenceList.Len()) - }) - - t.Run("cluster in namespace with existing OSD deployments", func(t *testing.T) { - // osd.0 on PVC in this namespace - d = getDummyDeploymentOnPVC(clientset, c, "pvc0", 0) - createDeploymentOrPanic(clientset, d) - - // osd.2 on Node in this namespace - d = getDummyDeploymentOnNode(clientset, c, "node2", 2) - createDeploymentOrPanic(clientset, d) - - errs = newProvisionErrors() - updateQueue, existenceList, err := c.getOSDUpdateInfo(errs) - assert.NoError(t, err) - assert.Zero(t, errs.len()) - assert.Equal(t, 2, updateQueue.Len()) - assert.True(t, updateQueue.Exists(0)) - assert.True(t, updateQueue.Exists(2)) - assert.Equal(t, 2, existenceList.Len()) - assert.True(t, existenceList.Exists(0)) - assert.True(t, existenceList.Exists(2)) - }) - - t.Run("existing OSD deployment with no OSD ID", func(t *testing.T) { - l := map[string]string{k8sutil.AppAttr: AppName} - addTestDeployment(clientset, "rook-ceph-osd-NOID", namespace, l) - - errs = newProvisionErrors() - updateQueue, existenceList, err := c.getOSDUpdateInfo(errs) - assert.NoError(t, err) - assert.Equal(t, 1, errs.len()) - // should have same update queue and existence list as last test - assert.Equal(t, 2, updateQueue.Len()) - assert.Equal(t, 2, existenceList.Len()) - }) - - t.Run("failure to list OSD deployments", func(t *testing.T) { - // reset the test to check that an error is reported if listing OSD deployments fails - test.PrependFailReactor(t, clientset, "list", "deployments") - ctx = &clusterd.Context{ - Clientset: clientset, - } - c = New(ctx, clusterInfo, spec, "rook/rook:master") - - errs = newProvisionErrors() - _, _, err := c.getOSDUpdateInfo(errs) - fmt.Println(err) - assert.Error(t, err) - }) -} - -func addTestDeployment(clientset *fake.Clientset, name, namespace string, labels map[string]string) { - d := &appsv1.Deployment{} - d.SetName(name) - d.SetNamespace(namespace) - d.SetLabels(labels) - createDeploymentOrPanic(clientset, d) -} - -func createDeploymentOrPanic(clientset *fake.Clientset, d *appsv1.Deployment) { - _, err := clientset.AppsV1().Deployments(d.Namespace).Create(context.TODO(), d, metav1.CreateOptions{}) - if err != nil { - panic(err) - } -} - -func Test_updateQueue(t *testing.T) { - q := newUpdateQueueWithCapacity(2) - assert.Equal(t, 2, cap(q.q)) - assert.Zero(t, q.Len()) - - testPop := func(osdID int) { - t.Helper() - id, ok := q.Pop() - assert.Equal(t, osdID, id) - assert.True(t, ok) - } - - assertEmpty := func() { - t.Helper() - id, ok := q.Pop() - assert.Equal(t, -1, id) - assert.False(t, ok) - } - - // assert empty behavior initially - assertEmpty() - - // test basic functionality - q.Push(0) - assert.Equal(t, 1, q.Len()) - testPop(0) - assertEmpty() - - // test that queue can hold more items than initial capacity - // and that items are Pushed/Popped in FIFO order - q.Push(1) - q.Push(2) - q.Push(3) - assert.Equal(t, 3, q.Len()) - assert.True(t, q.Exists(1)) - assert.True(t, q.Exists(2)) - assert.True(t, q.Exists(3)) - testPop(1) - testPop(2) - testPop(3) - assertEmpty() - - // Test removing queue items via q.Remove - for _, i := range []int{4, 5, 6, 7, 8} { - q.Push(i) - assert.True(t, q.Exists(i)) - } - assert.Equal(t, 5, q.Len()) - q.Remove([]int{ - 1, 2, 3, // non-existent items shouldn't affect queue - 4, // remove first item - 6, // remove a middle item - 8, // remove last item - }) - assert.Equal(t, 2, q.Len()) - assert.False(t, q.Exists(1)) - assert.False(t, q.Exists(2)) - assert.False(t, q.Exists(3)) - assert.True(t, q.Exists(5)) - assert.False(t, q.Exists(6)) - assert.True(t, q.Exists(7)) - assert.False(t, q.Exists(8)) - testPop(5) - // items pushed back onto the queue after removal should not get old values - q.Push(9) - q.Push(10) - assert.Equal(t, 3, q.Len()) - assert.False(t, q.Exists(5)) - assert.True(t, q.Exists(7)) - assert.True(t, q.Exists(9)) - assert.True(t, q.Exists(10)) - testPop(7) - testPop(9) - testPop(10) - assertEmpty() -} - -func Test_existenceList(t *testing.T) { - l := newExistenceListWithCapacity(2) - - // Assert zero item does not exist initially - assert.False(t, l.Exists(0)) - assert.Zero(t, l.Len()) - - // Assert basic functionality - l.Add(1) - assert.True(t, l.Exists(1)) - assert.False(t, l.Exists(0)) - assert.False(t, l.Exists(2)) - assert.Equal(t, 1, l.Len()) - - // assert that more items can be added than initial capacity - l.Add(0) - l.Add(2) - l.Add(3) - assert.True(t, l.Exists(0)) - assert.True(t, l.Exists(1)) // 1 should still exist from before - assert.True(t, l.Exists(2)) - assert.True(t, l.Exists(3)) - assert.False(t, l.Exists(4)) - assert.Equal(t, 4, l.Len()) - - // assert that the same item can be added twice (though this should never happen for OSDs IRL) - l.Add(1) - assert.True(t, l.Exists(1)) - assert.Equal(t, 4, l.Len()) -} diff --git a/pkg/operator/ceph/cluster/osd/volumes.go b/pkg/operator/ceph/cluster/osd/volumes.go deleted file mode 100644 index c38a462cb..000000000 --- a/pkg/operator/ceph/cluster/osd/volumes.go +++ /dev/null @@ -1,245 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "fmt" - "path" - "path/filepath" - - "github.com/libopenstorage/secrets" - kms "github.com/rook/rook/pkg/daemon/ceph/osd/kms" - "github.com/rook/rook/pkg/operator/ceph/config" - v1 "k8s.io/api/core/v1" -) - -const ( - udevPath = "/run/udev" - udevVolName = "run-udev" - osdEncryptionVolName = "osd-encryption-key" - dmPath = "/dev/mapper" - dmVolName = "dev-mapper" -) - -func getPvcOSDBridgeMount(claimName string) v1.VolumeMount { - return v1.VolumeMount{ - Name: fmt.Sprintf("%s-bridge", claimName), - MountPath: "/mnt", - } -} - -func getPvcOSDBridgeMountActivate(mountPath, claimName string) v1.VolumeMount { - return v1.VolumeMount{ - Name: fmt.Sprintf("%s-bridge", claimName), - MountPath: mountPath, - SubPath: path.Base(mountPath), - } -} - -func getPvcMetadataOSDBridgeMount(claimName string) v1.VolumeMount { - return v1.VolumeMount{ - Name: fmt.Sprintf("%s-bridge", claimName), - MountPath: "/srv", - } -} - -func getPvcWalOSDBridgeMount(claimName string) v1.VolumeMount { - return v1.VolumeMount{ - Name: fmt.Sprintf("%s-bridge", claimName), - MountPath: "/wal", - } -} - -func getDeviceMapperMount() v1.VolumeMount { - return v1.VolumeMount{ - MountPath: dmPath, - Name: dmVolName, - } -} - -func getDeviceMapperVolume() (v1.Volume, v1.VolumeMount) { - volume := v1.Volume{ - Name: dmVolName, - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{Path: dmPath}, - }, - } - - volumeMounts := v1.VolumeMount{ - Name: dmVolName, - MountPath: dmPath, - } - - return volume, volumeMounts -} - -func getDataBridgeVolumeSource(claimName, configDir, namespace string, inProvisioning bool) v1.VolumeSource { - var source v1.VolumeSource - if inProvisioning { - source.EmptyDir = &v1.EmptyDirVolumeSource{ - Medium: "Memory", - } - } else { - // We need to use hostPath to prevent multiple OSD pods from launching the same OSD and causing corruption. - // Ceph avoids this problem by locking fsid file and block device file under the data bridge volume directory. - // These locks are released by kernel once the process is gone, so until the ceph-osd daemon alives, the other - // pods (same OSD) will not be able to acquire them and will continue to be restarted. - // If we use emptyDir, this exclusive control doesn't work because the lock files aren't shared between OSD pods. - hostPathType := v1.HostPathDirectoryOrCreate - source.HostPath = &v1.HostPathVolumeSource{ - Path: filepath.Join( - configDir, - namespace, - claimName), - Type: &hostPathType, - } - } - return source -} - -func getPVCOSDVolumes(osdProps *osdProperties, configDir string, namespace string, prepare bool) []v1.Volume { - volumes := []v1.Volume{ - { - Name: osdProps.pvc.ClaimName, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &osdProps.pvc, - }, - }, - { - // We need a bridge mount which is basically a common volume mount between the non privileged init container - // and the privileged provision container or osd daemon container - // The reason for this is mentioned in the comment for getPVCInitContainer() method - Name: fmt.Sprintf("%s-bridge", osdProps.pvc.ClaimName), - VolumeSource: getDataBridgeVolumeSource(osdProps.pvc.ClaimName, configDir, namespace, prepare), - }, - } - - // If we have a metadata PVC let's add it - if osdProps.onPVCWithMetadata() { - metadataPVCVolume := []v1.Volume{ - { - Name: osdProps.metadataPVC.ClaimName, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &osdProps.metadataPVC, - }, - }, - { - // We need a bridge mount which is basically a common volume mount between the non privileged init container - // and the privileged provision container or osd daemon container - // The reason for this is mentioned in the comment for getPVCInitContainer() method - Name: fmt.Sprintf("%s-bridge", osdProps.metadataPVC.ClaimName), - VolumeSource: getDataBridgeVolumeSource(osdProps.metadataPVC.ClaimName, configDir, namespace, prepare), - }, - } - - volumes = append(volumes, metadataPVCVolume...) - } - - // If we have a wal PVC let's add it - if osdProps.onPVCWithWal() { - walPVCVolume := []v1.Volume{ - { - Name: osdProps.walPVC.ClaimName, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &osdProps.walPVC, - }, - }, - { - // We need a bridge mount which is basically a common volume mount between the non privileged init container - // and the privileged provision container or osd daemon container - // The reason for this is mentioned in the comment for getPVCInitContainer() method - Name: fmt.Sprintf("%s-bridge", osdProps.walPVC.ClaimName), - VolumeSource: getDataBridgeVolumeSource(osdProps.walPVC.ClaimName, configDir, namespace, prepare), - }, - } - - volumes = append(volumes, walPVCVolume...) - } - - logger.Debugf("volumes are %+v", volumes) - - return volumes -} - -func getUdevVolume() (v1.Volume, v1.VolumeMount) { - volume := v1.Volume{ - Name: udevVolName, - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{Path: udevPath}, - }, - } - - volumeMounts := v1.VolumeMount{ - Name: udevVolName, - MountPath: udevPath, - } - - return volume, volumeMounts -} - -func (c *Cluster) getEncryptionVolume(osdProps osdProperties) (v1.Volume, v1.VolumeMount) { - // Determine whether we have a KMS configuration - var isKMS bool - if len(c.spec.Security.KeyManagementService.ConnectionDetails) != 0 { - provider := kms.GetParam(c.spec.Security.KeyManagementService.ConnectionDetails, kms.Provider) - if provider == secrets.TypeVault { - isKMS = true - } - } - - // Generate volume - var m int32 = 0400 - volume := v1.Volume{ - Name: osdEncryptionVolName, - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: kms.GenerateOSDEncryptionSecretName(osdProps.pvc.ClaimName), - Items: []v1.KeyToPath{ - { - Key: kms.OsdEncryptionSecretNameKeyName, - Path: encryptionKeyFileName, - }, - }, - DefaultMode: &m, - }, - }, - } - - // On the KMS use case, we want the volume mount to be in memory since we pass write the KEK - if isKMS { - volume.VolumeSource.Secret = nil - volume.VolumeSource = v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{ - Medium: v1.StorageMediumMemory, - }, - } - } - - // Mounts /etc/ceph/luks_key - volumeMounts := v1.VolumeMount{ - Name: osdEncryptionVolName, - ReadOnly: true, - MountPath: config.EtcCephDir, - } - - // With KMS we must be able to write inside the directory to write the KEK - if isKMS { - volumeMounts.ReadOnly = false - } - - return volume, volumeMounts -} diff --git a/pkg/operator/ceph/cluster/osd/volumes_test.go b/pkg/operator/ceph/cluster/osd/volumes_test.go deleted file mode 100644 index 5f794bccd..000000000 --- a/pkg/operator/ceph/cluster/osd/volumes_test.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package osd - -import ( - "path/filepath" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" -) - -func TestGetEncryptionVolume(t *testing.T) { - var m int32 = 0400 - c := &Cluster{} - - // No KMS - osdProps := osdProperties{pvc: v1.PersistentVolumeClaimVolumeSource{ClaimName: "set1-data-1-bbgcw"}} - v, vM := c.getEncryptionVolume(osdProps) - assert.Equal(t, v1.Volume{Name: "osd-encryption-key", VolumeSource: v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "rook-ceph-osd-encryption-key-set1-data-1-bbgcw", Items: []v1.KeyToPath{{Key: "dmcrypt-key", Path: "luks_key"}}, DefaultMode: &m}}}, v) - assert.Equal(t, v1.VolumeMount{Name: "osd-encryption-key", ReadOnly: true, MountPath: "/etc/ceph"}, vM) - - // With KMS - c.spec.Security = cephv1.SecuritySpec{ - KeyManagementService: cephv1.KeyManagementServiceSpec{ - ConnectionDetails: map[string]string{"KMS_PROVIDER": "vault"}, - }, - } - v, vM = c.getEncryptionVolume(osdProps) - assert.Equal(t, v1.Volume{Name: "osd-encryption-key", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: "Memory"}}}, v) - assert.Equal(t, v1.VolumeMount{Name: "osd-encryption-key", ReadOnly: false, MountPath: "/etc/ceph"}, vM) -} - -func TestGetDataBridgeVolumeSource(t *testing.T) { - claimName := "test-claim" - configDir := "/var/lib/rook" - namespace := "rook-ceph" - - source := getDataBridgeVolumeSource(claimName, configDir, namespace, true) - assert.Equal(t, v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: "Memory"}}, source) - hostPathType := v1.HostPathDirectoryOrCreate - source = getDataBridgeVolumeSource(claimName, configDir, namespace, false) - assert.Equal(t, v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: filepath.Join(configDir, namespace, claimName), Type: &hostPathType}}, source) -} diff --git a/pkg/operator/ceph/cluster/predicate.go b/pkg/operator/ceph/cluster/predicate.go deleted file mode 100644 index 5a764135b..000000000 --- a/pkg/operator/ceph/cluster/predicate.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - "github.com/google/go-cmp/cmp" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - discoverDaemon "github.com/rook/rook/pkg/daemon/discover" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" -) - -// predicateForNodeWatcher is the predicate function to trigger reconcile on Node events -func predicateForNodeWatcher(client client.Client, context *clusterd.Context) predicate.Funcs { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - clientCluster := newClientCluster(client, e.Object.GetNamespace(), context) - return clientCluster.onK8sNode(e.Object) - }, - - UpdateFunc: func(e event.UpdateEvent) bool { - clientCluster := newClientCluster(client, e.ObjectNew.GetNamespace(), context) - return clientCluster.onK8sNode(e.ObjectNew) - }, - - DeleteFunc: func(e event.DeleteEvent) bool { - return false - }, - - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - } -} - -// predicateForHotPlugCMWatcher is the predicate function to trigger reconcile on ConfigMap events (hot-plug) -func predicateForHotPlugCMWatcher(client client.Client) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - isHotPlugCM := isHotPlugCM(e.ObjectNew) - if !isHotPlugCM { - logger.Debugf("hot-plug cm watcher: only reconcile on hot plug cm changes, this %q cm is handled by another watcher", e.ObjectNew.GetName()) - return false - } - - clientCluster := newClientCluster(client, e.ObjectNew.GetNamespace(), &clusterd.Context{}) - return clientCluster.onDeviceCMUpdate(e.ObjectOld, e.ObjectNew) - }, - - DeleteFunc: func(e event.DeleteEvent) bool { - // TODO: if the configmap goes away we could retrigger rook-discover DS - // However at this point the returned bool can only trigger a reconcile of the CephCluster object - // Definitely non-trivial but nice to have in the future - return false - }, - - CreateFunc: func(e event.CreateEvent) bool { - return false - }, - - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - } -} - -// isHotPlugCM informs whether the object is the cm for hot-plug disk -func isHotPlugCM(obj runtime.Object) bool { - // If not a ConfigMap, let's not reconcile - cm, ok := obj.(*corev1.ConfigMap) - if !ok { - return false - } - - // Get the labels - labels := cm.GetLabels() - - labelVal, labelKeyExist := labels[k8sutil.AppAttr] - if labelKeyExist && labelVal == discoverDaemon.AppName { - return true - } - - return false -} - -func watchControllerPredicate(rookContext *clusterd.Context) predicate.Funcs { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - logger.Debug("create event from a CR") - return true - }, - DeleteFunc: func(e event.DeleteEvent) bool { - logger.Debug("delete event from a CR") - return true - }, - UpdateFunc: func(e event.UpdateEvent) bool { - // resource.Quantity has non-exportable fields, so we use its comparator method - resourceQtyComparer := cmp.Comparer(func(x, y resource.Quantity) bool { return x.Cmp(y) == 0 }) - - switch objOld := e.ObjectOld.(type) { - case *cephv1.CephCluster: - objNew := e.ObjectNew.(*cephv1.CephCluster) - logger.Debug("update event on CephCluster CR") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - isDoNotReconcile := controller.IsDoNotReconcile(objNew.GetLabels()) - if isDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", controller.DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - // Set the cancellation flag to stop any ongoing orchestration - rookContext.RequestCancelOrchestration.Set() - - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - - } else if !objOld.GetDeletionTimestamp().Equal(objNew.GetDeletionTimestamp()) { - // Set the cancellation flag to stop any ongoing orchestration - rookContext.RequestCancelOrchestration.Set() - - logger.Infof("CR %q is going be deleted, cancelling any ongoing orchestration", objNew.Name) - return true - - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - } - - return false - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - } -} diff --git a/pkg/operator/ceph/cluster/predicate_test.go b/pkg/operator/ceph/cluster/predicate_test.go deleted file mode 100644 index aaec26afa..000000000 --- a/pkg/operator/ceph/cluster/predicate_test.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" -) - -func TestIsHotPlugCM(t *testing.T) { - blockPool := &cephv1.CephBlockPool{} - - assert.False(t, isHotPlugCM(blockPool)) - - cm := &corev1.ConfigMap{} - assert.False(t, isHotPlugCM(cm)) - - cm.Labels = map[string]string{ - "foo": "bar", - } - assert.False(t, isHotPlugCM(cm)) - - cm.Labels["app"] = "rook-discover" - assert.True(t, isHotPlugCM(cm)) -} diff --git a/pkg/operator/ceph/cluster/rbd/config.go b/pkg/operator/ceph/cluster/rbd/config.go deleted file mode 100644 index 03a8296ae..000000000 --- a/pkg/operator/ceph/cluster/rbd/config.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbd - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - keyringTemplate = ` -[client.rbd-mirror.%s] - key = %s - caps mon = "profile rbd-mirror" - caps osd = "profile rbd" -` -) - -// daemonConfig for a single rbd-mirror -type daemonConfig struct { - ResourceName string // the name rook gives to mirror resources in k8s metadata - DaemonID string // the ID of the Ceph daemon ("a", "b", ...) - DataPathMap *config.DataPathMap // location to store data in container - ownerInfo *k8sutil.OwnerInfo -} - -func (r *ReconcileCephRBDMirror) generateKeyring(clusterInfo *client.ClusterInfo, daemonConfig *daemonConfig) (string, error) { - ctx := context.TODO() - user := fullDaemonName(daemonConfig.DaemonID) - access := []string{"mon", "profile rbd-mirror", "osd", "profile rbd"} - s := keyring.GetSecretStore(r.context, clusterInfo, daemonConfig.ownerInfo) - - key, err := s.GenerateKey(user, access) - if err != nil { - return "", err - } - - // Delete legacy key store for upgrade from Rook v0.9.x to v1.0.x - err = r.context.Clientset.CoreV1().Secrets(clusterInfo.Namespace).Delete(ctx, daemonConfig.ResourceName, metav1.DeleteOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debugf("legacy rbd-mirror key %q is already removed", daemonConfig.ResourceName) - } else { - logger.Warningf("legacy rbd-mirror key %q could not be removed. %v", daemonConfig.ResourceName, err) - } - } - - keyring := fmt.Sprintf(keyringTemplate, daemonConfig.DaemonID, key) - return keyring, s.CreateOrUpdate(daemonConfig.ResourceName, keyring) -} - -func fullDaemonName(daemonID string) string { - return fmt.Sprintf("client.rbd-mirror.%s", daemonID) -} - -func (r *ReconcileCephRBDMirror) reconcileAddBoostrapPeer(cephRBDMirror *cephv1.CephRBDMirror, namespacedName types.NamespacedName) (reconcile.Result, error) { - ctx := context.TODO() - // List all the peers secret, we can have more than one peer we might want to configure - // For each, get the Kubernetes Secret and import the "peer token" so that we can configure the mirroring - - logger.Warning("(DEPRECATED) use of peer secret names in CephRBDMirror is deprecated. Please use CephBlockPool CR to configure peer secret names and import peers.") - for _, peerSecret := range cephRBDMirror.Spec.Peers.SecretNames { - logger.Debugf("fetching bootstrap peer kubernetes secret %q", peerSecret) - s, err := r.context.Clientset.CoreV1().Secrets(r.clusterInfo.Namespace).Get(ctx, peerSecret, metav1.GetOptions{}) - // We don't care about IsNotFound here, we still need to fail - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to fetch kubernetes secret %q bootstrap peer", peerSecret) - } - - // Validate peer secret content - err = opcontroller.ValidatePeerToken(cephRBDMirror, s.Data) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to validate rbd-mirror bootstrap peer secret %q data", peerSecret) - } - - // Add Peer detail to the Struct - r.peers[peerSecret] = &peerSpec{poolName: string(s.Data["pool"]), direction: string(s.Data["direction"])} - - // Add rbd-mirror peer - err = r.addPeer(peerSecret, s.Data) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to add rbd-mirror bootstrap peer") - } - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileCephRBDMirror) addPeer(peerSecret string, data map[string][]byte) error { - // Import bootstrap peer - err := client.ImportRBDMirrorBootstrapPeer(r.context, r.clusterInfo, r.peers[peerSecret].poolName, r.peers[peerSecret].direction, data["token"]) - if err != nil { - return errors.Wrap(err, "failed to import bootstrap peer token") - } - - // Now the bootstrap peer has been added so we can hydrate the pool mirror info - poolMirrorInfo, err := client.GetPoolMirroringInfo(r.context, r.clusterInfo, r.peers[peerSecret].poolName) - if err != nil { - return errors.Wrap(err, "failed to get pool mirror information") - } - r.peers[peerSecret].info = poolMirrorInfo - - return nil -} - -func validateSpec(r *cephv1.RBDMirroringSpec) error { - if r.Count == 0 { - return errors.New("rbd-mirror count must be at least one") - } - - return nil -} diff --git a/pkg/operator/ceph/cluster/rbd/config_test.go b/pkg/operator/ceph/cluster/rbd/config_test.go deleted file mode 100644 index bb6097253..000000000 --- a/pkg/operator/ceph/cluster/rbd/config_test.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbd - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/stretchr/testify/assert" -) - -func TestValidateSpec(t *testing.T) { - // Invalid count - r := &cephv1.RBDMirroringSpec{Count: 0} - err := validateSpec(r) - assert.Error(t, err) - - // Correct count - r.Count = 1 - err = validateSpec(r) - assert.NoError(t, err) - - // Valid only a single peer - r.Peers.SecretNames = append(r.Peers.SecretNames, "foo") - err = validateSpec(r) - assert.NoError(t, err) - - // Multiple pools mirroring are supported with the same peer is supported - r.Peers.SecretNames = append(r.Peers.SecretNames, "bar") - err = validateSpec(r) - assert.NoError(t, err) -} diff --git a/pkg/operator/ceph/cluster/rbd/controller.go b/pkg/operator/ceph/cluster/rbd/controller.go deleted file mode 100644 index a2ef91d51..000000000 --- a/pkg/operator/ceph/cluster/rbd/controller.go +++ /dev/null @@ -1,284 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbd - -import ( - "context" - "fmt" - "reflect" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opconfig "github.com/rook/rook/pkg/operator/ceph/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - controllerName = "ceph-rbd-mirror-controller" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -// List of object resources to watch by the controller -var objectsToWatch = []client.Object{ - &v1.Service{TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: v1.SchemeGroupVersion.String()}}, - &v1.ConfigMap{TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: v1.SchemeGroupVersion.String()}}, - &appsv1.Deployment{TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.String()}}, -} - -var cephRBDMirrorKind = reflect.TypeOf(cephv1.CephRBDMirror{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephRBDMirrorKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -// ReconcileCephRBDMirror reconciles a cephRBDMirror object -type ReconcileCephRBDMirror struct { - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - client client.Client - scheme *runtime.Scheme - cephClusterSpec *cephv1.ClusterSpec - peers map[string]*peerSpec -} - -// peerSpec represents peer details -type peerSpec struct { - info *cephv1.PoolMirroringInfo - poolName string - direction string -} - -// Add creates a new cephRBDMirror Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - return &ReconcileCephRBDMirror{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - peers: make(map[string]*peerSpec), - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the cephRBDMirror CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephRBDMirror{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - // Watch all other resources - for _, t := range objectsToWatch { - err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cephv1.CephRBDMirror{}, - }, opcontroller.WatchPredicateForNonCRDObject(&cephv1.CephRBDMirror{TypeMeta: controllerTypeMeta}, mgr.GetScheme())) - if err != nil { - return err - } - } - - // Build Handler function to return the list of ceph rbd-mirror - // This is used by the watchers below - handlerFunc, err := opcontroller.ObjectToCRMapper(mgr.GetClient(), &cephv1.CephRBDMirrorList{}, mgr.GetScheme()) - if err != nil { - return err - } - - // Watch for CephCluster Spec changes that we want to propagate to us - err = c.Watch(&source.Kind{Type: &cephv1.CephCluster{ - TypeMeta: metav1.TypeMeta{ - Kind: opcontroller.ClusterResource.Kind, - APIVersion: opcontroller.ClusterResource.APIVersion, - }, - }, - }, handler.EnqueueRequestsFromMapFunc(handlerFunc), opcontroller.WatchCephClusterPredicate()) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a cephRBDMirror object and makes changes based on the state read -// and what is in the cephRBDMirror.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileCephRBDMirror) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, err := r.reconcile(request) - if err != nil { - updateStatus(r.client, request.NamespacedName, k8sutil.FailedStatus) - logger.Errorf("failed to reconcile %v", err) - } - - return reconcileResponse, err -} - -func (r *ReconcileCephRBDMirror) reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the cephRBDMirror instance - cephRBDMirror := &cephv1.CephRBDMirror{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephRBDMirror) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("cephRBDMirror resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, errors.Wrap(err, "failed to get cephRBDMirror") - } - - // The CR was just created, initializing status fields - if cephRBDMirror.Status == nil { - updateStatus(r.client, request.NamespacedName, k8sutil.EmptyStatus) - } - - // validate the pool settings - if err := validateSpec(&cephRBDMirror.Spec); err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "invalid rbd-mirror CR %q spec", cephRBDMirror.Name) - } - - // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, _, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - logger.Debugf("CephCluster resource not ready in namespace %q, retrying in %q.", request.NamespacedName.Namespace, reconcileResponse.RequeueAfter.String()) - return reconcileResponse, nil - } - r.cephClusterSpec = &cephCluster.Spec - - // Populate clusterInfo - // Always populate it during each reconcile - r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to populate cluster info") - } - - // Populate CephVersion - daemon := string(opconfig.MonType) - currentCephVersion, err := cephclient.LeastUptodateDaemonVersion(r.context, r.clusterInfo, daemon) - if err != nil { - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Info(opcontroller.OperatorNotInitializedMessage) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil - } - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to retrieve current ceph %q version", daemon) - } - r.clusterInfo.CephVersion = currentCephVersion - - // Add bootstrap peer if any - logger.Debug("reconciling ceph rbd mirror peers addition") - reconcileResponse, err = r.reconcileAddBoostrapPeer(cephRBDMirror, request.NamespacedName) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to add ceph rbd mirror peer") - } - - // CREATE/UPDATE - logger.Debug("reconciling ceph rbd mirror deployments") - reconcileResponse, err = r.reconcileCreateCephRBDMirror(cephRBDMirror) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to create ceph rbd mirror deployments") - } - - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, k8sutil.ReadyStatus) - - // Return and do not requeue - logger.Debug("done reconciling ceph rbd mirror") - return reconcile.Result{}, nil - -} - -func (r *ReconcileCephRBDMirror) reconcileCreateCephRBDMirror(cephRBDMirror *cephv1.CephRBDMirror) (reconcile.Result, error) { - if r.cephClusterSpec.External.Enable { - _, err := opcontroller.ValidateCephVersionsBetweenLocalAndExternalClusters(r.context, r.clusterInfo) - if err != nil { - // This handles the case where the operator is running, the external cluster has been upgraded and a CR creation is called - // If that's a major version upgrade we fail, if it's a minor version, we continue, it's not ideal but not critical - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "refusing to run new crd") - } - } - - err := r.start(cephRBDMirror) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to start rbd mirror") - } - - return reconcile.Result{}, nil -} - -// updateStatus updates an object with a given status -func updateStatus(client client.Client, name types.NamespacedName, status string) { - rbdMirror := &cephv1.CephRBDMirror{} - err := client.Get(context.TODO(), name, rbdMirror) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephRBDMirror resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve rbd mirror %q to update status to %q. %v", name, status, err) - return - } - - if rbdMirror.Status == nil { - rbdMirror.Status = &cephv1.Status{} - } - - rbdMirror.Status.Phase = status - if err := reporting.UpdateStatus(client, rbdMirror); err != nil { - logger.Errorf("failed to set rbd mirror %q status to %q. %v", rbdMirror.Name, status, err) - return - } - logger.Debugf("rbd mirror %q status updated to %q", name, status) -} diff --git a/pkg/operator/ceph/cluster/rbd/controller_test.go b/pkg/operator/ceph/cluster/rbd/controller_test.go deleted file mode 100644 index 1675da907..000000000 --- a/pkg/operator/ceph/cluster/rbd/controller_test.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package file to manage a rook filesystem -package rbd - -import ( - "context" - "os" - "testing" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - cephAuthGetOrCreateKey = `{"key":"AQCvzWBeIV9lFRAAninzm+8XFxbSfTiPwoX50g=="}` - dummyVersionsRaw = ` - { - "mon": { - "ceph version 14.2.8 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 3 - } - }` -) - -func TestCephRBDMirrorController(t *testing.T) { - ctx := context.TODO() - var ( - name = "my-mirror" - namespace = "rook-ceph" - ) - // Set DEBUG logging - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - os.Setenv("ROOK_LOG_LEVEL", "DEBUG") - - // - // TEST 1 SETUP - // - // FAILURE because no CephCluster - // - // An rbd-mirror resource with metadata and spec. - rbdMirror := &cephv1.CephRBDMirror{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: cephv1.RBDMirroringSpec{ - Count: 1, - }, - TypeMeta: controllerTypeMeta, - } - - // Objects to track in the fake client. - object := []runtime.Object{ - rbdMirror, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return cephAuthGetOrCreateKey, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - if args[0] == "mirror" && args[1] == "pool" && args[2] == "info" { - return `{"mode":"image","site_name":"39074576-5884-4ef3-8a4d-8a0c5ed33031","peers":[{"uuid":"4a6983c0-3c9d-40f5-b2a9-2334a4659827","direction":"rx-tx","site_name":"ocs","mirror_uuid":"","client_name":"client.rbd-mirror-peer"}]}`, nil - } - if args[0] == "mirror" && args[1] == "pool" && args[2] == "status" { - return `{"summary":{"health":"WARNING","daemon_health":"OK","image_health":"WARNING","states":{"unknown":1}}}`, nil - } - return "", nil - }, - } - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectStore{}) - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Create a ReconcileCephRBDMirror object with the scheme and fake client. - r := &ReconcileCephRBDMirror{client: cl, scheme: s, context: c} - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - logger.Info("STARTING PHASE 1") - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 1 DONE") - - // - // TEST 2: - // - // FAILURE we have a cluster but it's not ready - // - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - object = append(object, cephCluster) - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileCephRBDMirror object with the scheme and fake client. - r = &ReconcileCephRBDMirror{client: cl, scheme: s, context: c} - logger.Info("STARTING PHASE 2") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 2 DONE") - - // - // TEST 3: - // - // SUCCESS! The CephCluster is ready - // - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Create a ReconcileCephRBDMirror object with the scheme and fake client. - r = &ReconcileCephRBDMirror{ - client: cl, - scheme: s, - context: c, - peers: make(map[string]*peerSpec), - } - - logger.Info("STARTING PHASE 4") - - peerSecretName := "peer-secret" - rbdMirror.Spec.Peers.SecretNames = []string{peerSecretName} - err = r.client.Update(context.TODO(), rbdMirror) - assert.NoError(t, err) - res, err = r.Reconcile(ctx, req) - assert.Error(t, err) - assert.True(t, res.Requeue) - - logger.Info("STARTING PHASE 5") - bootstrapPeerToken := `eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ==` //nolint:gosec // This is just a var name, not a real token - peerSecret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: peerSecretName, - Namespace: namespace, - }, - Data: map[string][]byte{"token": []byte(bootstrapPeerToken), "pool": []byte("goo")}, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, peerSecret, metav1.CreateOptions{}) - assert.NoError(t, err) - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, rbdMirror) - assert.NoError(t, err) - assert.Equal(t, "Ready", rbdMirror.Status.Phase, rbdMirror) -} diff --git a/pkg/operator/ceph/cluster/rbd/mirror.go b/pkg/operator/ceph/cluster/rbd/mirror.go deleted file mode 100644 index af3e01268..000000000 --- a/pkg/operator/ceph/cluster/rbd/mirror.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package rbd for mirroring -package rbd - -import ( - "context" - "fmt" - - "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -const ( - // AppName is the ceph rbd mirror application name - AppName = "rook-ceph-rbd-mirror" - // minimum amount of memory in MB to run the pod - cephRbdMirrorPodMinimumMemory uint64 = 512 -) - -var updateDeploymentAndWait = mon.UpdateCephDeploymentAndWait - -// Start begins the process of running rbd mirroring daemons. -func (r *ReconcileCephRBDMirror) start(cephRBDMirror *cephv1.CephRBDMirror) error { - ctx := context.TODO() - // Validate pod's memory if specified - err := controller.CheckPodMemory(cephv1.ResourcesKeyRBDMirror, cephRBDMirror.Spec.Resources, cephRbdMirrorPodMinimumMemory) - if err != nil { - return errors.Wrap(err, "error checking pod memory") - } - - logger.Infof("configure rbd-mirroring with %d workers", cephRBDMirror.Spec.Count) - - ownerInfo := k8sutil.NewOwnerInfo(cephRBDMirror, r.scheme) - daemonID := k8sutil.IndexToName(0) - resourceName := fmt.Sprintf("%s-%s", AppName, daemonID) - daemonConf := &daemonConfig{ - DaemonID: daemonID, - ResourceName: resourceName, - DataPathMap: config.NewDatalessDaemonDataPathMap(cephRBDMirror.Namespace, r.cephClusterSpec.DataDirHostPath), - ownerInfo: ownerInfo, - } - - _, err = r.generateKeyring(r.clusterInfo, daemonConf) - if err != nil { - return errors.Wrapf(err, "failed to generate keyring for %q", resourceName) - } - - // Start the deployment - d, err := r.makeDeployment(daemonConf, cephRBDMirror) - if err != nil { - return errors.Wrap(err, "failed to create rbd-mirror deployment") - } - - // Set owner ref to cephRBDMirror object - err = controllerutil.SetControllerReference(cephRBDMirror, d, r.scheme) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference for ceph rbd-mirror deployment %q", d.Name) - } - - // Set the deployment hash as an annotation - err = patch.DefaultAnnotator.SetLastAppliedAnnotation(d) - if err != nil { - return errors.Wrapf(err, "failed to set annotation for deployment %q", d.Name) - } - - if _, err := r.context.Clientset.AppsV1().Deployments(cephRBDMirror.Namespace).Create(ctx, d, metav1.CreateOptions{}); err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create %q deployment", resourceName) - } - logger.Infof("deployment for rbd-mirror %q already exists. updating if needed", resourceName) - - if err := updateDeploymentAndWait(r.context, r.clusterInfo, d, config.RbdMirrorType, daemonConf.DaemonID, r.cephClusterSpec.SkipUpgradeChecks, false); err != nil { - // fail could be an issue updating label selector (immutable), so try del and recreate - logger.Debugf("updateDeploymentAndWait failed for rbd-mirror %q. Attempting del-and-recreate. %v", resourceName, err) - err = r.context.Clientset.AppsV1().Deployments(cephRBDMirror.Namespace).Delete(ctx, cephRBDMirror.Name, metav1.DeleteOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to delete rbd-mirror %q during del-and-recreate update attempt", resourceName) - } - if _, err := r.context.Clientset.AppsV1().Deployments(cephRBDMirror.Namespace).Create(ctx, d, metav1.CreateOptions{}); err != nil { - return errors.Wrapf(err, "failed to recreate rbd-mirror deployment %q during del-and-recreate update attempt", resourceName) - } - } - } - - logger.Infof("%q deployment started", resourceName) - return nil -} diff --git a/pkg/operator/ceph/cluster/rbd/spec.go b/pkg/operator/ceph/cluster/rbd/spec.go deleted file mode 100644 index 7f47c16e4..000000000 --- a/pkg/operator/ceph/cluster/rbd/spec.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbd - -import ( - "fmt" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (r *ReconcileCephRBDMirror) makeDeployment(daemonConfig *daemonConfig, rbdMirror *cephv1.CephRBDMirror) (*apps.Deployment, error) { - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: daemonConfig.ResourceName, - Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, true), - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - r.makeChownInitContainer(daemonConfig, rbdMirror), - }, - Containers: []v1.Container{ - r.makeMirroringDaemonContainer(daemonConfig, rbdMirror), - }, - RestartPolicy: v1.RestartPolicyAlways, - Volumes: controller.DaemonVolumes(daemonConfig.DataPathMap, daemonConfig.ResourceName), - HostNetwork: r.cephClusterSpec.Network.IsHost(), - PriorityClassName: rbdMirror.Spec.PriorityClassName, - }, - } - - // If the log collector is enabled we add the side-car container - if r.cephClusterSpec.LogCollector.Enabled { - shareProcessNamespace := true - podSpec.Spec.ShareProcessNamespace = &shareProcessNamespace - podSpec.Spec.Containers = append(podSpec.Spec.Containers, *controller.LogCollectorContainer(fmt.Sprintf("ceph-client.rbd-mirror.%s", daemonConfig.DaemonID), r.clusterInfo.Namespace, *r.cephClusterSpec)) - } - - // Replace default unreachable node toleration - k8sutil.AddUnreachableNodeToleration(&podSpec.Spec) - rbdMirror.Spec.Annotations.ApplyToObjectMeta(&podSpec.ObjectMeta) - rbdMirror.Spec.Labels.ApplyToObjectMeta(&podSpec.ObjectMeta) - - if r.cephClusterSpec.Network.IsHost() { - podSpec.Spec.DNSPolicy = v1.DNSClusterFirstWithHostNet - } else if r.cephClusterSpec.Network.IsMultus() { - if err := k8sutil.ApplyMultus(r.cephClusterSpec.Network, &podSpec.ObjectMeta); err != nil { - return nil, err - } - } - rbdMirror.Spec.Placement.ApplyToPodSpec(&podSpec.Spec) - - replicas := int32(rbdMirror.Spec.Count) - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: daemonConfig.ResourceName, - Namespace: rbdMirror.Namespace, - Annotations: rbdMirror.Spec.Annotations, - Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, true), - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: podSpec.Labels, - }, - Template: podSpec, - Replicas: &replicas, - }, - } - k8sutil.AddRookVersionLabelToDeployment(d) - controller.AddCephVersionLabelToDeployment(r.clusterInfo.CephVersion, d) - rbdMirror.Spec.Annotations.ApplyToObjectMeta(&d.ObjectMeta) - rbdMirror.Spec.Labels.ApplyToObjectMeta(&d.ObjectMeta) - - return d, nil -} - -func (r *ReconcileCephRBDMirror) makeChownInitContainer(daemonConfig *daemonConfig, rbdMirror *cephv1.CephRBDMirror) v1.Container { - return controller.ChownCephDataDirsInitContainer( - *daemonConfig.DataPathMap, - r.cephClusterSpec.CephVersion.Image, - controller.DaemonVolumeMounts(daemonConfig.DataPathMap, daemonConfig.ResourceName), - rbdMirror.Spec.Resources, - controller.PodSecurityContext(), - ) -} - -func (r *ReconcileCephRBDMirror) makeMirroringDaemonContainer(daemonConfig *daemonConfig, rbdMirror *cephv1.CephRBDMirror) v1.Container { - container := v1.Container{ - Name: "rbd-mirror", - Command: []string{ - "rbd-mirror", - }, - Args: append( - controller.DaemonFlags(r.clusterInfo, r.cephClusterSpec, daemonConfig.DaemonID), - "--foreground", - "--name="+fullDaemonName(daemonConfig.DaemonID), - ), - Image: r.cephClusterSpec.CephVersion.Image, - VolumeMounts: controller.DaemonVolumeMounts(daemonConfig.DataPathMap, daemonConfig.ResourceName), - Env: controller.DaemonEnvVars(r.cephClusterSpec.CephVersion.Image), - Resources: rbdMirror.Spec.Resources, - SecurityContext: controller.PodSecurityContext(), - WorkingDir: config.VarLogCephDir, - // TODO: - // Not implemented at this point since the socket name is '/run/ceph/ceph-client.rbd-mirror.a.1.94362516231272.asok' - // Also the command to run will be: - // ceph --admin-daemon /run/ceph/ceph-client.rbd-mirror.a.1.94362516231272.asok rbd mirror status - // LivenessProbe: controller.GenerateLivenessProbeExecDaemon(config.RbdMirrorType, daemonConfig.DaemonID), - } - - return container -} diff --git a/pkg/operator/ceph/cluster/rbd/spec_test.go b/pkg/operator/ceph/cluster/rbd/spec_test.go deleted file mode 100644 index 376cf74ec..000000000 --- a/pkg/operator/ceph/cluster/rbd/spec_test.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbd - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - - "github.com/rook/rook/pkg/operator/ceph/test" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestPodSpec(t *testing.T) { - namespace := "ns" - daemonConf := daemonConfig{ - DaemonID: "a", - ResourceName: "rook-ceph-rbd-mirror-a", - DataPathMap: config.NewDatalessDaemonDataPathMap("rook-ceph", "/var/lib/rook"), - } - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Spec: cephv1.ClusterSpec{ - CephVersion: cephv1.CephVersionSpec{ - Image: "quay.io/ceph/ceph:v15", - }, - }, - } - - rbdMirror := &cephv1.CephRBDMirror{ - ObjectMeta: metav1.ObjectMeta{ - Name: "a", - Namespace: namespace, - }, - Spec: cephv1.RBDMirroringSpec{ - Count: 1, - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(200.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(600.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(300.0, resource.BinarySI), - }, - }, - PriorityClassName: "my-priority-class", - }, - TypeMeta: controllerTypeMeta, - } - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.Nautilus, - } - s := scheme.Scheme - object := []runtime.Object{rbdMirror} - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - r := &ReconcileCephRBDMirror{client: cl, scheme: s, peers: make(map[string]*peerSpec)} - r.cephClusterSpec = &cephCluster.Spec - r.clusterInfo = clusterInfo - - d, err := r.makeDeployment(&daemonConf, rbdMirror) - assert.NoError(t, err) - assert.Equal(t, "rook-ceph-rbd-mirror-a", d.Name) - assert.Equal(t, 4, len(d.Spec.Template.Spec.Volumes)) - assert.Equal(t, 1, len(d.Spec.Template.Spec.Volumes[0].Projected.Sources)) - assert.Equal(t, 4, len(d.Spec.Template.Spec.Containers[0].VolumeMounts)) - - // Deployment should have Ceph labels - test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.RbdMirrorType, "a", AppName, "ns") - - podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) - podTemplate.RunFullSuite(config.RbdMirrorType, "a", AppName, "ns", "quay.io/ceph/ceph:myceph", - "200", "100", "600", "300", /* resources */ - "my-priority-class") -} diff --git a/pkg/operator/ceph/cluster/register_controllers.go b/pkg/operator/ceph/cluster/register_controllers.go deleted file mode 100644 index 93e1d4da6..000000000 --- a/pkg/operator/ceph/cluster/register_controllers.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/crash" - "github.com/rook/rook/pkg/operator/ceph/cluster/rbd" - "github.com/rook/rook/pkg/operator/ceph/disruption/clusterdisruption" - "github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig" - "github.com/rook/rook/pkg/operator/ceph/disruption/machinedisruption" - "github.com/rook/rook/pkg/operator/ceph/disruption/machinelabel" - "github.com/rook/rook/pkg/operator/ceph/file" - "github.com/rook/rook/pkg/operator/ceph/file/mirror" - "github.com/rook/rook/pkg/operator/ceph/nfs" - "github.com/rook/rook/pkg/operator/ceph/object" - "github.com/rook/rook/pkg/operator/ceph/object/realm" - objectuser "github.com/rook/rook/pkg/operator/ceph/object/user" - "github.com/rook/rook/pkg/operator/ceph/object/zone" - "github.com/rook/rook/pkg/operator/ceph/object/zonegroup" - "github.com/rook/rook/pkg/operator/ceph/pool" - - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -var ( - // EnableMachineDisruptionBudget checks whether machine disruption budget is enabled - EnableMachineDisruptionBudget bool -) - -// AddToManagerFuncsMaintenance is a list of functions to add all Controllers to the Manager (entrypoint for controller) -var AddToManagerFuncsMaintenance = []func(manager.Manager, *controllerconfig.Context) error{ - clusterdisruption.Add, -} - -// MachineDisruptionBudgetAddToManagerFuncs is a list of fencing related functions to add all Controllers to the Manager (entrypoint for controller) -var MachineDisruptionBudgetAddToManagerFuncs = []func(manager.Manager, *controllerconfig.Context) error{ - machinelabel.Add, - machinedisruption.Add, -} - -// AddToManagerFuncs is a list of functions to add all Controllers to the Manager (entrypoint for controller) -var AddToManagerFuncs = []func(manager.Manager, *clusterd.Context) error{ - crash.Add, - pool.Add, - objectuser.Add, - realm.Add, - zonegroup.Add, - zone.Add, - object.Add, - file.Add, - nfs.Add, - rbd.Add, - client.Add, - mirror.Add, -} - -// AddToManager adds all the registered controllers to the passed manager. -// each controller package will have an Add method listed in AddToManagerFuncs -// which will setup all the necessary watch -func AddToManager(m manager.Manager, c *controllerconfig.Context, clusterController *ClusterController) error { - if c == nil { - return errors.New("nil context passed") - } - - // Run CephCluster CR - if err := Add(m, c.ClusterdContext, clusterController); err != nil { - return err - } - - // Add Ceph child CR controllers - for _, f := range AddToManagerFuncs { - if err := f(m, c.ClusterdContext); err != nil { - return err - } - } - - // Add maintenance controllers - for _, f := range AddToManagerFuncsMaintenance { - if err := f(m, c); err != nil { - return err - } - } - - // If machine disruption budget is enabled let's add the controllers - if EnableMachineDisruptionBudget { - for _, f := range MachineDisruptionBudgetAddToManagerFuncs { - if err := f(m, c); err != nil { - return err - } - } - } - - return nil -} diff --git a/pkg/operator/ceph/cluster/utils.go b/pkg/operator/ceph/cluster/utils.go deleted file mode 100644 index 65ba8307b..000000000 --- a/pkg/operator/ceph/cluster/utils.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - "context" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// populateConfigOverrideConfigMap creates the "rook-config-override" config map -// Its content allows modifying Ceph configuration flags -func populateConfigOverrideConfigMap(clusterdContext *clusterd.Context, namespace string, ownerInfo *k8sutil.OwnerInfo) error { - ctx := context.TODO() - placeholderConfig := map[string]string{ - k8sutil.ConfigOverrideVal: "", - } - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: k8sutil.ConfigOverrideName, - Namespace: namespace, - }, - Data: placeholderConfig, - } - - err := ownerInfo.SetControllerReference(cm) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to override configmap %q", cm.Name) - } - _, err = clusterdContext.Clientset.CoreV1().ConfigMaps(namespace).Create(ctx, cm, metav1.CreateOptions{}) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create override configmap %s", namespace) - } - - return nil -} diff --git a/pkg/operator/ceph/cluster/version.go b/pkg/operator/ceph/cluster/version.go deleted file mode 100644 index 8e436d888..000000000 --- a/pkg/operator/ceph/cluster/version.go +++ /dev/null @@ -1,242 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - daemonclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/controller" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil/cmdreporter" -) - -func (c *ClusterController) detectAndValidateCephVersion(cluster *cluster) (*cephver.CephVersion, bool, error) { - version, err := cluster.detectCephVersion(c.rookImage, cluster.Spec.CephVersion.Image, detectCephVersionTimeout) - if err != nil { - return nil, false, err - } - - logger.Info("validating ceph version from provided image") - if err := cluster.validateCephVersion(version); err != nil { - return nil, cluster.isUpgrade, err - } - - // Update ceph version field in cluster object status - c.updateClusterCephVersion(cluster.Spec.CephVersion.Image, *version) - - return version, cluster.isUpgrade, nil -} - -func (c *cluster) printOverallCephVersion() { - versions, err := daemonclient.GetAllCephDaemonVersions(c.context, c.ClusterInfo) - if err != nil { - logger.Errorf("failed to get ceph daemons versions. %v", err) - return - } - - if len(versions.Overall) == 1 { - for v := range versions.Overall { - version, err := cephver.ExtractCephVersion(v) - if err != nil { - logger.Errorf("failed to extract ceph version. %v", err) - return - } - vv := *version - logger.Infof("successfully upgraded cluster to version: %q", vv.String()) - } - } else { - // This shouldn't happen, but let's log just in case - logger.Warningf("upgrade orchestration completed but somehow we still have more than one Ceph version running. %v:", versions.Overall) - } -} - -// This function compare the Ceph spec image and the cluster running version -// It returns true if the image is different and false if identical -func diffImageSpecAndClusterRunningVersion(imageSpecVersion cephver.CephVersion, runningVersions cephv1.CephDaemonsVersions) (bool, error) { - numberOfCephVersions := len(runningVersions.Overall) - if numberOfCephVersions == 0 { - // let's return immediately - return false, errors.Errorf("no 'overall' section in the ceph versions. %+v", runningVersions.Overall) - } - - if numberOfCephVersions > 1 { - // let's return immediately - logger.Warningf("it looks like we have more than one ceph version running. triggering upgrade. %+v:", runningVersions.Overall) - return true, nil - } - - if numberOfCephVersions == 1 { - for v := range runningVersions.Overall { - version, err := cephver.ExtractCephVersion(v) - if err != nil { - logger.Errorf("failed to extract ceph version. %v", err) - return false, err - } - clusterRunningVersion := *version - - // If this is the same version - if cephver.IsIdentical(clusterRunningVersion, imageSpecVersion) { - logger.Debugf("both cluster and image spec versions are identical, doing nothing %s", imageSpecVersion.String()) - return false, nil - } - - if cephver.IsSuperior(imageSpecVersion, clusterRunningVersion) { - logger.Infof("image spec version %s is higher than the running cluster version %s, upgrading", imageSpecVersion.String(), clusterRunningVersion.String()) - return true, nil - } - - if cephver.IsInferior(imageSpecVersion, clusterRunningVersion) { - return true, errors.Errorf("image spec version %s is lower than the running cluster version %s, downgrading is not supported", imageSpecVersion.String(), clusterRunningVersion.String()) - } - } - } - - return false, nil -} - -// detectCephVersion loads the ceph version from the image and checks that it meets the version requirements to -// run in the cluster -func (c *cluster) detectCephVersion(rookImage, cephImage string, timeout time.Duration) (*cephver.CephVersion, error) { - logger.Infof("detecting the ceph image version for image %s...", cephImage) - versionReporter, err := cmdreporter.New( - c.context.Clientset, c.ownerInfo, - detectVersionName, detectVersionName, c.Namespace, - []string{"ceph"}, []string{"--version"}, - rookImage, cephImage) - if err != nil { - return nil, errors.Wrap(err, "failed to set up ceph version job") - } - - job := versionReporter.Job() - job.Spec.Template.Spec.ServiceAccountName = "rook-ceph-cmd-reporter" - - // Apply the same placement for the ceph version detection as the mon daemons except for PodAntiAffinity - cephv1.GetMonPlacement(c.Spec.Placement).ApplyToPodSpec(&job.Spec.Template.Spec) - job.Spec.Template.Spec.Affinity.PodAntiAffinity = nil - - stdout, stderr, retcode, err := versionReporter.Run(timeout) - if err != nil { - return nil, errors.Wrap(err, "failed to complete ceph version job") - } - if retcode != 0 { - return nil, errors.Errorf(`ceph version job returned failure with retcode %d. - stdout: %s - stderr: %s`, retcode, stdout, stderr) - } - - version, err := cephver.ExtractCephVersion(stdout) - if err != nil { - return nil, errors.Wrap(err, "failed to extract ceph version") - } - logger.Infof("detected ceph image version: %q", version) - return version, nil -} - -func (c *cluster) validateCephVersion(version *cephver.CephVersion) error { - if !c.Spec.External.Enable { - if !version.IsAtLeast(cephver.Minimum) { - return errors.Errorf("the version does not meet the minimum version %q", cephver.Minimum.String()) - } - - if !version.Supported() { - if !c.Spec.CephVersion.AllowUnsupported { - return errors.Errorf("allowUnsupported must be set to true to run with this version %q", version.String()) - } - logger.Warningf("unsupported ceph version detected: %q, pursuing", version) - } - - if version.Unsupported() { - logger.Errorf("UNSUPPORTED: ceph version %q detected, it is recommended to rollback to the previous pin-point stable release, pursuing anyways", version) - } - } - - // The following tries to determine if the operator can proceed with an upgrade because we come from an OnAdd() call - // If the cluster was unhealthy and someone injected a new image version, an upgrade was triggered but failed because the cluster is not healthy - // Then after this, if the operator gets restarted we are not able to fail if the cluster is not healthy, the following tries to determine the - // state we are in and if we should upgrade or not - - // Try to load clusterInfo so we can compare the running version with the one from the spec image - clusterInfo, _, _, err := mon.LoadClusterInfo(c.context, c.Namespace) - if err == nil { - // Write connection info (ceph config file and keyring) for ceph commands - err = mon.WriteConnectionConfig(c.context, clusterInfo) - if err != nil { - logger.Errorf("failed to write config. attempting to continue. %v", err) - } - } - - if !clusterInfo.IsInitialized(false) { - // If not initialized, this is likely a new cluster so there is nothing to do - logger.Debug("cluster not initialized, nothing to validate") - return nil - } - - clusterInfo.CephVersion = *version - if c.Spec.External.Enable && c.Spec.CephVersion.Image != "" { - c.ClusterInfo.CephVersion, err = controller.ValidateCephVersionsBetweenLocalAndExternalClusters(c.context, c.ClusterInfo) - if err != nil { - return errors.Wrap(err, "failed to validate ceph version between external and local") - } - } - - // On external cluster setup, if we don't bootstrap any resources in the Kubernetes cluster then - // there is no need to validate the Ceph image further - if c.Spec.External.Enable && c.Spec.CephVersion.Image == "" { - logger.Debug("no spec image specified on external cluster, not validating Ceph version.") - return nil - } - - // Get cluster running versions - versions, err := daemonclient.GetAllCephDaemonVersions(c.context, c.ClusterInfo) - if err != nil { - logger.Errorf("failed to get ceph daemons versions, this typically happens during the first cluster initialization. %v", err) - return nil - } - - runningVersions := *versions - differentImages, err := diffImageSpecAndClusterRunningVersion(*version, runningVersions) - if err != nil { - logger.Errorf("failed to determine if we should upgrade or not. %v", err) - // we shouldn't block the orchestration if we can't determine the version of the image spec, we proceed anyway in best effort - // we won't be able to check if there is an update or not and what to do, so we don't check the cluster status either - // This will happen if someone uses ceph/daemon:latest-master for instance - return nil - } - - if differentImages { - // If the image version changed let's make sure we can safely upgrade - // check ceph's status, if not healthy we fail - cephHealthy := daemonclient.IsCephHealthy(c.context, c.ClusterInfo) - if !cephHealthy { - if c.Spec.SkipUpgradeChecks { - logger.Warning("ceph is not healthy but SkipUpgradeChecks is set, forcing upgrade.") - } else { - return errors.Errorf("ceph status in namespace %s is not healthy, refusing to upgrade. Either fix the health issue or force an update by setting skipUpgradeChecks to true in the cluster CR", c.Namespace) - } - } - // This is an upgrade - logger.Infof("upgrading ceph cluster to %q", version.String()) - c.isUpgrade = true - } - - return nil -} diff --git a/pkg/operator/ceph/cluster/version_test.go b/pkg/operator/ceph/cluster/version_test.go deleted file mode 100755 index 3e56461ef..000000000 --- a/pkg/operator/ceph/cluster/version_test.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage Kubernetes storage. -package cluster - -import ( - "encoding/json" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - testop "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" -) - -func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) { - - // 1st test - fakeImageVersion := cephver.Nautilus - fakeRunningVersions := []byte(` - { - "mon": { - "ceph version 16.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) pacific (stable)": 1, - "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2 - } - }`) - var dummyRunningVersions cephv1.CephDaemonsVersions - err := json.Unmarshal([]byte(fakeRunningVersions), &dummyRunningVersions) - assert.NoError(t, err) - - m, err := diffImageSpecAndClusterRunningVersion(fakeImageVersion, dummyRunningVersions) - assert.Error(t, err) // Overall is absent - assert.False(t, m) - - // 2nd test - more than 1 version means we should upgrade - fakeRunningVersions = []byte(` - { - "overall": { - "ceph version 16.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) pacific (stable)": 1, - "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2 - } - }`) - var dummyRunningVersions2 cephv1.CephDaemonsVersions - err = json.Unmarshal([]byte(fakeRunningVersions), &dummyRunningVersions2) - assert.NoError(t, err) - - m, err = diffImageSpecAndClusterRunningVersion(fakeImageVersion, dummyRunningVersions2) - assert.NoError(t, err) - assert.True(t, m) - - // 3rd test - spec version is lower than running cluster? what's going on? - fakeRunningVersions = []byte(` - { - "overall": { - "ceph version 15.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) octopus (stable)": 2 - } - }`) - var dummyRunningVersions3 cephv1.CephDaemonsVersions - err = json.Unmarshal([]byte(fakeRunningVersions), &dummyRunningVersions3) - assert.NoError(t, err) - - m, err = diffImageSpecAndClusterRunningVersion(fakeImageVersion, dummyRunningVersions3) - assert.Error(t, err) - assert.True(t, m) - - // 4 test - spec version is higher than running cluster --> we upgrade - fakeImageVersion = cephver.Pacific - fakeRunningVersions = []byte(` - { - "overall": { - "ceph version 15.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) octopus (stable)": 2 - } - }`) - var dummyRunningVersions4 cephv1.CephDaemonsVersions - err = json.Unmarshal([]byte(fakeRunningVersions), &dummyRunningVersions4) - assert.NoError(t, err) - - m, err = diffImageSpecAndClusterRunningVersion(fakeImageVersion, dummyRunningVersions4) - assert.NoError(t, err) - assert.True(t, m) - - // 5 test - spec version and running cluster versions are identical --> we upgrade - fakeImageVersion = cephver.CephVersion{Major: 16, Minor: 2, Extra: 2, - CommitID: "3a54b2b6d167d4a2a19e003a705696d4fe619afc"} - fakeRunningVersions = []byte(` - { - "overall": { - "ceph version 16.2.2 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) pacific (stable)": 2 - } - }`) - var dummyRunningVersions5 cephv1.CephDaemonsVersions - err = json.Unmarshal([]byte(fakeRunningVersions), &dummyRunningVersions5) - assert.NoError(t, err) - - m, err = diffImageSpecAndClusterRunningVersion(fakeImageVersion, dummyRunningVersions5) - assert.NoError(t, err) - assert.False(t, m) - - // 6 test - spec version and running cluster have different commit ID - fakeImageVersion = cephver.CephVersion{Major: 16, Minor: 2, Extra: 11, Build: 139, - CommitID: "5c0dc966af809fd1d429ec7bac48962a746af243"} - fakeRunningVersions = []byte(` - { - "overall": { - "ceph version 16.2.11-139.el8cp (3a54b2b6d167d4a2a19e003a705696d4fe619afc) pacific (stable)": 2 - } - }`) - var dummyRunningVersions6 cephv1.CephDaemonsVersions - err = json.Unmarshal([]byte(fakeRunningVersions), &dummyRunningVersions6) - assert.NoError(t, err) - - m, err = diffImageSpecAndClusterRunningVersion(fakeImageVersion, dummyRunningVersions6) - assert.NoError(t, err) - assert.True(t, m) - - // 7 test - spec version and running cluster have same commit ID - fakeImageVersion = cephver.CephVersion{Major: 16, Minor: 2, Extra: 11, Build: 139, - CommitID: "3a54b2b6d167d4a2a19e003a705696d4fe619afc"} - fakeRunningVersions = []byte(` - { - "overall": { - "ceph version 16.2.11-139.el8cp (3a54b2b6d167d4a2a19e003a705696d4fe619afc) pacific (stable)": 2 - } - }`) - var dummyRunningVersions7 cephv1.CephDaemonsVersions - err = json.Unmarshal([]byte(fakeRunningVersions), &dummyRunningVersions7) - assert.NoError(t, err) - - m, err = diffImageSpecAndClusterRunningVersion(fakeImageVersion, dummyRunningVersions7) - assert.NoError(t, err) - assert.False(t, m) -} - -func TestMinVersion(t *testing.T) { - c := testSpec(t) - c.Spec.CephVersion.AllowUnsupported = true - - // All versions less than 14.2.5 are invalid - v := &cephver.CephVersion{Major: 13, Minor: 2, Extra: 3} - assert.Error(t, c.validateCephVersion(v)) - v = &cephver.CephVersion{Major: 14, Minor: 2, Extra: 1} - assert.Error(t, c.validateCephVersion(v)) - v = &cephver.CephVersion{Major: 14} - assert.Error(t, c.validateCephVersion(v)) - - // All versions at least 14.2.5 are valid - v = &cephver.CephVersion{Major: 14, Minor: 2, Extra: 5} - assert.NoError(t, c.validateCephVersion(v)) - v = &cephver.CephVersion{Major: 15} - assert.NoError(t, c.validateCephVersion(v)) -} - -func TestSupportedVersion(t *testing.T) { - c := testSpec(t) - - // Supported versions are valid - v := &cephver.CephVersion{Major: 14, Minor: 2, Extra: 12} - assert.NoError(t, c.validateCephVersion(v)) - - // Supported versions are valid - v = &cephver.CephVersion{Major: 15, Minor: 2, Extra: 5} - assert.NoError(t, c.validateCephVersion(v)) - - // Supported versions are valid - v = &cephver.CephVersion{Major: 16, Minor: 2, Extra: 0} - assert.NoError(t, c.validateCephVersion(v)) - - // Unsupported versions are not valid - v = &cephver.CephVersion{Major: 17, Minor: 2, Extra: 0} - assert.Error(t, c.validateCephVersion(v)) - - // Unsupported versions are now valid - c.Spec.CephVersion.AllowUnsupported = true - assert.NoError(t, c.validateCephVersion(v)) -} - -func testSpec(t *testing.T) *cluster { - clientset := testop.New(t, 1) - context := &clusterd.Context{ - Clientset: clientset, - } - return &cluster{Spec: &cephv1.ClusterSpec{}, context: context} -} diff --git a/pkg/operator/ceph/cluster/watcher.go b/pkg/operator/ceph/cluster/watcher.go deleted file mode 100644 index e6b3ffac8..000000000 --- a/pkg/operator/ceph/cluster/watcher.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster to manage a Ceph cluster. -package cluster - -import ( - "context" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - discoverDaemon "github.com/rook/rook/pkg/daemon/discover" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// clientCluster struct contains a client to interact with Kubernetes object -// as well as the NamespacedName (used in requests) -type clientCluster struct { - client client.Client - namespace string - context *clusterd.Context -} - -var nodesCheckedForReconcile = sets.NewString() - -func newClientCluster(client client.Client, namespace string, context *clusterd.Context) *clientCluster { - return &clientCluster{ - client: client, - namespace: namespace, - context: context, - } -} - -func checkStorageForNode(cluster *cephv1.CephCluster) bool { - if !cluster.Spec.Storage.UseAllNodes && len(cluster.Spec.Storage.Nodes) == 0 && len(cluster.Spec.Storage.StorageClassDeviceSets) == 0 { - logger.Debugf("node watcher: useAllNodes is set to false and no nodes storageClassDevicesets or volumeSources are specified in cluster %q, skipping", cluster.Namespace) - return false - } - return true -} - -// onK8sNodeAdd is triggered when a node is added in the Kubernetes cluster -func (c *clientCluster) onK8sNode(object runtime.Object) bool { - node, ok := object.(*v1.Node) - if !ok { - return false - } - // skip reconcile if node is already checked in a previous reconcile - if nodesCheckedForReconcile.Has(node.Name) { - return false - } - // Get CephCluster - cluster := c.getCephCluster() - - if !k8sutil.GetNodeSchedulable(*node) { - logger.Debugf("node watcher: skipping cluster update. added node %q is unschedulable", node.Labels[v1.LabelHostname]) - return false - } - - if !k8sutil.NodeIsTolerable(*node, cephv1.GetOSDPlacement(cluster.Spec.Placement).Tolerations, false) { - logger.Debugf("node watcher: node %q is not tolerable for cluster %q, skipping", node.Name, cluster.Namespace) - return false - } - - if !checkStorageForNode(cluster) { - nodesCheckedForReconcile.Insert(node.Name) - return false - } - - // Too strict? this replaces clusterInfo == nil - if cluster.Status.Phase != cephv1.ConditionReady { - logger.Debugf("node watcher: cluster %q is not ready. skipping orchestration", cluster.Namespace) - return false - } - - logger.Debugf("node %q is ready, checking if it can run OSDs", node.Name) - nodesCheckedForReconcile.Insert(node.Name) - valid, _ := k8sutil.ValidNode(*node, cephv1.GetOSDPlacement(cluster.Spec.Placement)) - if valid { - nodeName := node.Name - hostname, ok := node.Labels[v1.LabelHostname] - if ok && hostname != "" { - nodeName = hostname - } - // Make sure we can call Ceph properly - // Is the node in the CRUSH map already? - // If so we don't need to reconcile, this is done to avoid double reconcile on operator restart - // Assume the admin key since we are watching for node status to create OSDs - clusterInfo := cephclient.AdminClusterInfo(cluster.Namespace) - osds, err := cephclient.GetOSDOnHost(c.context, clusterInfo, nodeName) - if err != nil { - // If it fails, this might be due to the the operator just starting and catching an add event for that node - logger.Debugf("failed to get osds on node %q, assume reconcile is necessary", nodeName) - return true - } - - // Reconcile if there are no OSDs in the CRUSH map and if the host does not exist in the CRUSH map. - if osds == "" { - logger.Infof("node watcher: adding node %q to cluster %q", node.Labels[v1.LabelHostname], cluster.Namespace) - return true - } - - // This is Debug level because the node receives frequent updates and this will pollute the logs - logger.Debugf("node watcher: node %q is already an OSD node with %q", nodeName, osds) - } - return false -} - -// onDeviceCMUpdate is trigger when the hot plug config map is updated -func (c *clientCluster) onDeviceCMUpdate(oldObj, newObj runtime.Object) bool { - oldCm, ok := oldObj.(*v1.ConfigMap) - if !ok { - return false - } - logger.Debugf("hot-plug cm watcher: onDeviceCMUpdate old device cm: %+v", oldCm) - - newCm, ok := newObj.(*v1.ConfigMap) - if !ok { - return false - } - logger.Debugf("hot-plug cm watcher: onDeviceCMUpdate new device cm: %+v", newCm) - - oldDevStr, ok := oldCm.Data[discoverDaemon.LocalDiskCMData] - if !ok { - logger.Warning("hot-plug cm watcher: unexpected old configmap data") - return false - } - - newDevStr, ok := newCm.Data[discoverDaemon.LocalDiskCMData] - if !ok { - logger.Warning("hot-plug cm watcher: unexpected new configmap data") - return false - } - - devicesEqual, err := discoverDaemon.DeviceListsEqual(oldDevStr, newDevStr) - if err != nil { - logger.Warningf("hot-plug cm watcher: failed to compare device lists. %v", err) - return false - } - - if devicesEqual { - logger.Debug("hot-plug cm watcher: device lists are equal. skipping orchestration") - return false - } - - // Get CephCluster - cluster := c.getCephCluster() - - if cluster.Status.Phase != cephv1.ConditionReady { - logger.Debugf("hot-plug cm watcher: cluster %q is not ready. skipping orchestration.", cluster.Namespace) - return false - } - - if len(cluster.Spec.Storage.StorageClassDeviceSets) > 0 { - logger.Info("hot-plug cm watcher: skip orchestration on device config map update for OSDs on PVC") - return false - } - - logger.Infof("hot-plug cm watcher: running orchestration for namespace %q after device change", cluster.Namespace) - return true -} - -func (c *clientCluster) getCephCluster() *cephv1.CephCluster { - clusterList := &cephv1.CephClusterList{} - - err := c.client.List(context.TODO(), clusterList, client.InNamespace(c.namespace)) - if err != nil { - logger.Debugf("%q: failed to fetch CephCluster %v", controllerName, err) - return &cephv1.CephCluster{} - } - if len(clusterList.Items) == 0 { - logger.Debugf("%q: no CephCluster resource found in namespace %q", controllerName, c.namespace) - return &cephv1.CephCluster{} - } - - return &clusterList.Items[0] -} diff --git a/pkg/operator/ceph/cluster/watcher_test.go b/pkg/operator/ceph/cluster/watcher_test.go deleted file mode 100644 index 17f1834df..000000000 --- a/pkg/operator/ceph/cluster/watcher_test.go +++ /dev/null @@ -1,223 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "os" - "testing" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func getFakeClient(obj ...runtime.Object) client.Client { - // Register operator types with the runtime scheme. - scheme := scheme.Scheme - scheme.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}) - client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(obj...).Build() - return client -} - -func fakeCluster(ns string) *cephv1.CephCluster { - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: ns, - Namespace: ns, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - }, - Spec: cephv1.ClusterSpec{ - Storage: cephv1.StorageScopeSpec{}, - }, - } - return cephCluster -} - -func TestCheckStorageForNode(t *testing.T) { - ns := "rook-ceph" - cephCluster := fakeCluster(ns) - - assert.False(t, checkStorageForNode(cephCluster)) - - cephCluster.Spec.Storage.UseAllNodes = true - assert.True(t, checkStorageForNode(cephCluster)) - - fakeNodes := []cephv1.Node{ - { - Name: "nodeA", - }, - } - cephCluster.Spec.Storage.Nodes = fakeNodes - assert.True(t, checkStorageForNode(cephCluster)) - - fakeDeviceSets := []cephv1.StorageClassDeviceSet{ - { - Name: "DeviceSet1", - }, - } - cephCluster.Spec.Storage.StorageClassDeviceSets = fakeDeviceSets - assert.True(t, checkStorageForNode(cephCluster)) -} - -func TestOnK8sNode(t *testing.T) { - ns := "rook-ceph" - cephCluster := fakeCluster(ns) - objects := []runtime.Object{ - cephCluster, - } - // Create a fake client to mock API calls. - client := getFakeClient(objects...) - - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - return "", errors.New("failed to get osd list on host") - } - clientCluster := newClientCluster(client, ns, &clusterd.Context{ - Executor: executor, - }) - - node := &corev1.Node{ - Spec: corev1.NodeSpec{ - Unschedulable: false, - }, - Status: corev1.NodeStatus{ - Conditions: []corev1.NodeCondition{ - { - Status: corev1.ConditionStatus(corev1.ConditionTrue), - Type: corev1.NodeConditionType(corev1.NodeReady), - }, - }, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "fakenode", - }, - } - - // node will reconcile - fakeNodes := []cephv1.Node{ - { - Name: "nodeA", - }, - } - fakeDeviceSets := []cephv1.StorageClassDeviceSet{ - { - Name: "DeviceSet1", - }, - } - cephCluster.Spec.Storage.Nodes = fakeNodes - cephCluster.Spec.Storage.StorageClassDeviceSets = fakeDeviceSets - cephCluster.Spec.Storage.UseAllNodes = true - cephCluster.Status.Phase = k8sutil.ReadyStatus - client = getFakeClient(objects...) - clientCluster.client = client - b := clientCluster.onK8sNode(node) - assert.True(t, b) - - // node will not reconcile - b = clientCluster.onK8sNode(node) - assert.False(t, b) -} - -func TestOnDeviceCMUpdate(t *testing.T) { - // Set DEBUG logging - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - os.Setenv("ROOK_LOG_LEVEL", "DEBUG") - - service := &corev1.Service{} - ns := "rook-ceph" - cephCluster := fakeCluster(ns) - objects := []runtime.Object{ - cephCluster, - } - - // Create a fake client to mock API calls. - client := getFakeClient(objects...) - clientCluster := newClientCluster(client, ns, &clusterd.Context{}) - - // Dummy object - b := clientCluster.onDeviceCMUpdate(service, service) - assert.False(t, b) - - // No Data in the cm - oldCM := &corev1.ConfigMap{} - newCM := &corev1.ConfigMap{} - b = clientCluster.onDeviceCMUpdate(oldCM, newCM) - assert.False(t, b) - - devices := []byte(` - [ - { - "name": "dm-0", - "parent": ".", - "hasChildren": false, - "devLinks": "/dev/disk/by-id/dm-name-ceph--bee31cdd--e899--4f9a--9e77--df71cfad66f9-osd--data--b5df7900--0cf0--4b1a--a337--7b57c9f0111b/dev/disk/by-id/dm-uuid-LVM-B10SBHeAy5yF6l2OM3p3EqTQbUAYc6JI63n8ZZPTmxRHXTJHmQ4YTAIBCJqY931Z", - "size": 31138512896, - "uuid": "aafee853-1b8d-4a15-83a9-17825728befc", - "serial": "", - "type": "lvm", - "rotational": true, - "readOnly": false, - "Partitions": [ - { - "Name": "ceph--bee31cdd--e899--4f9a--9e77--df71cfad66f9-osd--data--b5df7900--0cf0--4b1a--a337--7b57c9f0111b", - "Size": 0, - "Label": "", - "Filesystem": "" - } - ], - "filesystem": "ceph_bluestore", - "vendor": "", - "model": "", - "wwn": "", - "wwnVendorExtension": "", - "empty": false, - "real-path": "/dev/mapper/ceph--bee31cdd--e899--4f9a--9e77--df71cfad66f9-osd--data--b5df7900--0cf0--4b1a--a337--7b57c9f0111b" - } - ]`) - - oldData := make(map[string]string, 1) - oldData["devices"] = "[{}]" - oldCM.Data = oldData - - newData := make(map[string]string, 1) - newData["devices"] = string(devices) - newCM.Data = newData - - // now there is a diff but cluster is not ready - b = clientCluster.onDeviceCMUpdate(oldCM, newCM) - assert.False(t, b) - - // finally the cluster is ready and we can reconcile - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - client = getFakeClient(objects...) - clientCluster.client = client - b = clientCluster.onDeviceCMUpdate(oldCM, newCM) - assert.True(t, b) -} diff --git a/pkg/operator/ceph/config/config.go b/pkg/operator/ceph/config/config.go deleted file mode 100644 index e3c54f9fc..000000000 --- a/pkg/operator/ceph/config/config.go +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config provides methods for generating the Ceph config for a Ceph cluster and for -// producing a "ceph.conf" compatible file from the config as well as Ceph command line-compatible -// flags. -package config - -import ( - "fmt" - "path" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-config") - -const ( - // MonType defines the mon DaemonType - MonType = "mon" - - // MgrType defines the mgr DaemonType - MgrType = "mgr" - - // OsdType defines the osd DaemonType - OsdType = "osd" - - // MdsType defines the mds DaemonType - MdsType = "mds" - - // RgwType defines the rgw DaemonType - RgwType = "rgw" - - // RbdMirrorType defines the rbd-mirror DaemonType - RbdMirrorType = "rbd-mirror" - - // FilesystemMirrorType defines the fs-mirror DaemonType - FilesystemMirrorType = "fs-mirror" - - // CrashType defines the crash collector DaemonType - CrashType = "crashcollector" - - // CephUser is the Linux Ceph username - CephUser = "ceph" - - // CephGroup is the Linux Ceph groupname - CephGroup = "ceph" -) - -var ( - // VarLibCephDir is simply "/var/lib/ceph". It is made overwritable only for unit tests where it - // may be needed to send data intended for /var/lib/ceph to a temporary test dir. - VarLibCephDir = "/var/lib/ceph" - - // EtcCephDir is simply "/etc/ceph". It is made overwritable only for unit tests where it - // may be needed to send data intended for /etc/ceph to a temporary test dir. - EtcCephDir = "/etc/ceph" - - // VarLogCephDir defines Ceph logging directory. It is made overwritable only for unit tests where it - // may be needed to send data intended for /var/log/ceph to a temporary test dir. - VarLogCephDir = "/var/log/ceph" - - // VarLibCephCrashDir defines Ceph crash reports directory. - VarLibCephCrashDir = path.Join(VarLibCephDir, "crash") -) - -// normalizeKey converts a key in any format to a key with underscores. -// -// The internal representation of Ceph config keys uses underscores only, where Ceph supports both -// spaces, underscores, and hyphens. This is so that Rook can properly match and override keys even -// when they are specified as "some config key" in one section, "some_config_key" in another -// section, and "some-config-key" in yet another section. -func normalizeKey(key string) string { - return strings.Replace(strings.Replace(key, " ", "_", -1), "-", "_", -1) -} - -// NewFlag returns the key-value pair in the format of a Ceph command line-compatible flag. -func NewFlag(key, value string) string { - // A flag is a normalized key with underscores replaced by dashes. - // "debug default" ~normalize~> "debug_default" ~to~flag~> "debug-default" - n := normalizeKey(key) - f := strings.Replace(n, "_", "-", -1) - return fmt.Sprintf("--%s=%s", f, value) -} - -// SetOrRemoveDefaultConfigs sets Rook's desired default configs in the centralized monitor database. This -// cannot be called before at least one monitor is established. -// Also, legacy options will be removed -func SetOrRemoveDefaultConfigs( - context *clusterd.Context, - clusterInfo *cephclient.ClusterInfo, - clusterSpec cephv1.ClusterSpec, -) error { - // ceph.conf is never used. All configurations are made in the centralized mon config database, - // or they are specified on the commandline when daemons are called. - monStore := GetMonStore(context, clusterInfo) - - if err := monStore.SetAll(DefaultCentralizedConfigs(clusterInfo.CephVersion)...); err != nil { - return errors.Wrapf(err, "failed to apply default Ceph configurations") - } - - // When enabled the collector will logrotate logs from files - if clusterSpec.LogCollector.Enabled { - // Override "log file" for existing clusters since it is empty - logOptions := []Option{ - configOverride("global", "log to file", "true"), - } - - if err := monStore.SetAll(logOptions...); err != nil { - return errors.Wrapf(err, "failed to apply logging configuration for log collector") - } - // If the log collector is disabled we do not log to file since we collect nothing - } else { - logOptions := []Option{ - configOverride("global", "log to file", "false"), - } - - if err := monStore.SetAll(logOptions...); err != nil { - return errors.Wrapf(err, "failed to apply logging configuration") - } - } - - if err := monStore.SetAll(DefaultLegacyConfigs()...); err != nil { - return errors.Wrapf(err, "failed to apply legacy config overrides") - } - - // Apply Multus if needed - if clusterSpec.Network.IsMultus() { - logger.Info("configuring ceph network(s) with multus") - cephNetworks, err := generateNetworkSettings(context, clusterInfo.Namespace, clusterSpec.Network.Selectors) - if err != nil { - return errors.Wrap(err, "failed to generate network settings") - } - - // Apply ceph network settings to the mon config store - if err := monStore.SetAll(cephNetworks...); err != nil { - return errors.Wrap(err, "failed to network config overrides") - } - } - - // This section will remove any previously configured option(s) from the mon centralized store - // This is useful for scenarios where options are not needed anymore and we just want to reset to internal's default - // On upgrade, the flag will be removed - if err := monStore.DeleteAll(LegacyConfigs()...); err != nil { - return errors.Wrap(err, "failed to remove legacy options") - } - - return nil -} diff --git a/pkg/operator/ceph/config/config_test.go b/pkg/operator/ceph/config/config_test.go deleted file mode 100644 index 89a4b477f..000000000 --- a/pkg/operator/ceph/config/config_test.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewFlag(t *testing.T) { - assert.Equal(t, NewFlag("k", ""), "--k=") - assert.Equal(t, NewFlag("a-key", "a"), "--a-key=a") - assert.Equal(t, NewFlag("b_key", "b"), "--b-key=b") - assert.Equal(t, NewFlag("c key", "c"), "--c-key=c") - assert.Equal(t, NewFlag("quotes", "\"quoted\""), "--quotes=\"quoted\"") -} diff --git a/pkg/operator/ceph/config/datapath.go b/pkg/operator/ceph/config/datapath.go deleted file mode 100644 index 224c53ba5..000000000 --- a/pkg/operator/ceph/config/datapath.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config provides a shared way of referring to data storage locations for Ceph Daemons, -// including both the in-container location and on-host location as well as whether the data is -// persisted to the host. -package config - -import ( - "path" -) - -// A DataPathMap is a struct which contains information about where Ceph daemon data is stored in -// containers and whether the data should be persisted to the host. If it is persisted to the host, -// directory on the host where the specific daemon's data is stored is given. -type DataPathMap struct { - // HostDataDir should be set to the path on the host where the specific daemon's data is stored. - // If this is empty, the daemon does not persist data to the host, but data may still be shared - // between containers in a pod via an empty dir. - HostDataDir string - - // ContainerDataDir should be set to the path in the container where the specific daemon's data - // is stored. If this is empty, the daemon does not store data at all, even in the container, - // and data is not shared between container in a pod via empty dir. - ContainerDataDir string - - // HostLogAndCrashDir dir represents Ceph's logging and crash dump dir on the host. - // Logs are stored in the "log" subdir and crash dumps in the "crash" subdir of this directory. - // If this is empty logs are not persisted to the host. - // The log dir is always /var/log/ceph. If logs are not persisted to the - // host, logs are not shared between containers via empty dir or any other mechanism. - HostLogAndCrashDir string -} - -// NewStatefulDaemonDataPathMap returns a new DataPathMap for a daemon which requires a persistent -// config (mons, osds). daemonDataDirHostRelativePath is the path relative to the dataDirHostPath -// where the daemon's data is stored on the host's filesystem. Daemons which use a DataPathMap -// created by this method will only have access to their own data and not the entire dataDirHostPath -// which may include data from other daemons. -func NewStatefulDaemonDataPathMap( - dataDirHostPath, daemonDataDirHostRelativePath string, - daemonType, daemonID, namespace string, -) *DataPathMap { - return &DataPathMap{ - HostDataDir: path.Join(dataDirHostPath, daemonDataDirHostRelativePath), - ContainerDataDir: cephDataDir(daemonType, daemonID), - HostLogAndCrashDir: path.Join(dataDirHostPath, namespace), - } -} - -// NewStatelessDaemonDataPathMap returns a new DataPathMap for a daemon which does not persist data -// to the host (mgrs, mdses, rgws) -func NewStatelessDaemonDataPathMap( - daemonType, daemonID, namespace, dataDirHostPath string, -) *DataPathMap { - return &DataPathMap{ - HostDataDir: "", - ContainerDataDir: cephDataDir(daemonType, daemonID), - HostLogAndCrashDir: path.Join(dataDirHostPath, namespace), - } -} - -// NewDatalessDaemonDataPathMap returns a new DataPathMap for a daemon which does not utilize a data -// dir in the container as the mon, mgr, osd, mds, and rgw daemons do. -func NewDatalessDaemonDataPathMap(namespace, dataDirHostPath string) *DataPathMap { - return &DataPathMap{ - HostDataDir: dataDirHostPath, - ContainerDataDir: "", - HostLogAndCrashDir: path.Join(dataDirHostPath, namespace), - } -} - -func cephDataDir(daemonType, daemonID string) string { - // daemons' default data dirs are: /var/lib/ceph//ceph- - return path.Join(VarLibCephDir, daemonType, "ceph-"+daemonID) -} - -// ContainerCrashDir returns the directory of the crash collector -func (d *DataPathMap) ContainerCrashDir() string { - return VarLibCephCrashDir -} - -// ContainerLogDir returns the directory of the Ceph logs -func (d *DataPathMap) ContainerLogDir() string { - return VarLogCephDir -} - -// HostLogDir returns the directory path on the host for Ceph logs -func (d *DataPathMap) HostLogDir() string { - return path.Join(d.HostLogAndCrashDir, "log") -} - -// HostCrashDir returns the directory path on the host for Ceph crashes -func (d *DataPathMap) HostCrashDir() string { - return path.Join(d.HostLogAndCrashDir, "crash") -} diff --git a/pkg/operator/ceph/config/datapath_test.go b/pkg/operator/ceph/config/datapath_test.go deleted file mode 100644 index 96bd31f68..000000000 --- a/pkg/operator/ceph/config/datapath_test.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewStatefulDaemonDataPathMap(t *testing.T) { - // mon - d := NewStatefulDaemonDataPathMap("/var/lib/rook", "/mon-a/data", MonType, "a", "rook-ceph") - assert.Equal(t, &DataPathMap{ - HostDataDir: "/var/lib/rook/mon-a/data", - ContainerDataDir: "/var/lib/ceph/mon/ceph-a", - HostLogAndCrashDir: "/var/lib/rook/rook-ceph", - }, d) - - // osd - d = NewStatefulDaemonDataPathMap("/var/lib/rook/", "osd0/", OsdType, "0", "rook-ceph") - assert.Equal(t, &DataPathMap{ - HostDataDir: "/var/lib/rook/osd0", - ContainerDataDir: "/var/lib/ceph/osd/ceph-0", - HostLogAndCrashDir: "/var/lib/rook/rook-ceph", - }, d) -} - -func TestNewStatelessDaemonDataPathMap(t *testing.T) { - // mgr - d := NewStatelessDaemonDataPathMap(MgrType, "a", "rook-ceph", "/var/lib/rook") - assert.Equal(t, &DataPathMap{ - HostDataDir: "", - ContainerDataDir: "/var/lib/ceph/mgr/ceph-a", - HostLogAndCrashDir: "/var/lib/rook/rook-ceph", - }, d) - - // mds - d = NewStatelessDaemonDataPathMap(MdsType, "myfs.a", "rook-ceph", "/var/lib/rook") - assert.Equal(t, &DataPathMap{ - HostDataDir: "", - ContainerDataDir: "/var/lib/ceph/mds/ceph-myfs.a", - HostLogAndCrashDir: "/var/lib/rook/rook-ceph", - }, d) - - // rgw - d = NewStatelessDaemonDataPathMap(RgwType, "objstore", "rook-ceph", "/var/lib/rook") - assert.Equal(t, &DataPathMap{ - HostDataDir: "", - ContainerDataDir: "/var/lib/ceph/rgw/ceph-objstore", - HostLogAndCrashDir: "/var/lib/rook/rook-ceph", - }, d) -} - -func TestNewDatalessDaemonDataPathMap(t *testing.T) { - // rbdmirror - d := NewDatalessDaemonDataPathMap("rook-ceph", "/var/lib/rook") - assert.Equal(t, &DataPathMap{ - HostDataDir: "/var/lib/rook", - ContainerDataDir: "", - HostLogAndCrashDir: "/var/lib/rook/rook-ceph", - }, d) -} diff --git a/pkg/operator/ceph/config/defaults.go b/pkg/operator/ceph/config/defaults.go deleted file mode 100644 index 5fafba817..000000000 --- a/pkg/operator/ceph/config/defaults.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config provides default configurations which Rook will set in Ceph clusters. -package config - -import ( - "github.com/rook/rook/pkg/operator/ceph/version" -) - -// DefaultFlags returns the default configuration flags Rook will set on the command line for all -// calls to Ceph daemons and tools. Values specified here will not be able to be overridden using -// the mon's central KV store, and that is (and should be) by intent. -func DefaultFlags(fsid, mountedKeyringPath string) []string { - flags := []string{ - // fsid unnecessary but is a safety to make sure daemons can only connect to their cluster - NewFlag("fsid", fsid), - NewFlag("keyring", mountedKeyringPath), - } - - flags = append(flags, LoggingFlags()...) - flags = append(flags, StoredMonHostEnvVarFlags()...) - - return flags -} - -// makes it possible to be slightly less verbose to create a ConfigOverride here -func configOverride(who, option, value string) Option { - return Option{Who: who, Option: option, Value: value} -} - -func LoggingFlags() []string { - return []string{ - // For containers, we're expected to log everything to stderr - NewFlag("log-to-stderr", "true"), - NewFlag("err-to-stderr", "true"), - NewFlag("mon-cluster-log-to-stderr", "true"), - // differentiate debug text from audit text, and the space after 'debug' is critical - NewFlag("log-stderr-prefix", "debug "), - NewFlag("default-log-to-file", "false"), - NewFlag("default-mon-cluster-log-to-file", "false"), - } -} - -// DefaultCentralizedConfigs returns the default configuration options Rook will set in Ceph's -// centralized config store. -func DefaultCentralizedConfigs(cephVersion version.CephVersion) []Option { - overrides := []Option{ - configOverride("global", "mon allow pool delete", "true"), - configOverride("global", "mon cluster log file", ""), - } - - // We disable "bluestore warn on legacy statfs" - // This setting appeared on 14.2.2, so if detected we disable the warning - // As of 14.2.5 (https://github.com/rook/rook/issues/3539#issuecomment-531287051), Ceph will disable this flag by default so there is no need to apply it - if cephVersion.IsAtLeast(version.CephVersion{Major: 14, Minor: 2, Extra: 2}) && version.IsInferior(cephVersion, version.CephVersion{Major: 14, Minor: 2, Extra: 5}) { - overrides = append(overrides, []Option{ - configOverride("global", "bluestore warn on legacy statfs", "false"), - }...) - } - - // For Pacific - if cephVersion.IsAtLeastPacific() { - overrides = append(overrides, []Option{ - configOverride("global", "mon allow pool size one", "true"), - }...) - } - - // Every release before Quincy will enable PG auto repair on Bluestore OSDs - if !cephVersion.IsAtLeastQuincy() { - overrides = append(overrides, []Option{ - configOverride("global", "osd scrub auto repair", "true"), - }...) - } - - return overrides -} - -// DefaultLegacyConfigs need to be added to the Ceph config file until the integration tests can be -// made to override these options for the Ceph clusters it creates. -func DefaultLegacyConfigs() []Option { - overrides := []Option{ - // TODO: move this under LegacyConfigs() when FlexVolume is no longer supported - configOverride("global", "rbd_default_features", "3"), - } - return overrides -} - -// LegacyConfigs represents old configuration that were applied to a cluster and not needed anymore -func LegacyConfigs() []Option { - return []Option{ - {Who: "global", Option: "log file"}, - } -} diff --git a/pkg/operator/ceph/config/keyring/admin.go b/pkg/operator/ceph/config/keyring/admin.go deleted file mode 100644 index fa793f6a7..000000000 --- a/pkg/operator/ceph/config/keyring/admin.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package keyring - -import ( - "fmt" - - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" -) - -const ( - adminKeyringResourceName = "rook-ceph-admin" - crashCollectorKeyringResourceName = "rook-ceph-crash-collector" - - adminKeyringTemplate = ` -[client.admin] - key = %s - caps mds = "allow *" - caps mon = "allow *" - caps osd = "allow *" - caps mgr = "allow *" -` -) - -// An AdminStore is a specialized derivative of the SecretStore helper for storing the Ceph cluster -// admin keyring as a Kubernetes secret. -type AdminStore struct { - secretStore *SecretStore -} - -// Admin returns the special Admin keyring store type. -func (s *SecretStore) Admin() *AdminStore { - return &AdminStore{secretStore: s} -} - -// CreateOrUpdate creates or updates the admin keyring secret with cluster information. -func (a *AdminStore) CreateOrUpdate(c *cephclient.ClusterInfo) error { - keyring := fmt.Sprintf(adminKeyringTemplate, c.CephCred.Secret) - return a.secretStore.CreateOrUpdate(adminKeyringResourceName, keyring) -} diff --git a/pkg/operator/ceph/config/keyring/admin_test.go b/pkg/operator/ceph/config/keyring/admin_test.go deleted file mode 100644 index beee9a2e7..000000000 --- a/pkg/operator/ceph/config/keyring/admin_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package keyring - -import ( - "context" - "fmt" - "path" - "testing" - - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" - "github.com/rook/rook/pkg/operator/k8sutil" - testop "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestAdminKeyringStore(t *testing.T) { - ctxt := context.TODO() - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - } - ns := "test-ns" - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - clusterInfo := clienttest.CreateTestClusterInfo(1) - clusterInfo.Namespace = ns - k := GetSecretStore(ctx, clusterInfo, ownerInfo) - - assertKeyringData := func(expectedKeyring string) { - s, e := clientset.CoreV1().Secrets(ns).Get(ctxt, "rook-ceph-admin-keyring", metav1.GetOptions{}) - assert.NoError(t, e) - assert.Equal(t, 1, len(s.StringData)) - assert.Equal(t, expectedKeyring, s.StringData["keyring"]) - assert.Equal(t, k8sutil.RookType, string(s.Type)) - } - - // create key - clusterInfo.CephCred.Secret = "adminsecretkey" - err := k.Admin().CreateOrUpdate(clusterInfo) - assert.NoError(t, err) - assertKeyringData(fmt.Sprintf(adminKeyringTemplate, "adminsecretkey")) - - // update key - clusterInfo.CephCred.Secret = "differentsecretkey" - err = k.Admin().CreateOrUpdate(clusterInfo) - assert.NoError(t, err) - assertKeyringData(fmt.Sprintf(adminKeyringTemplate, "differentsecretkey")) -} - -func TestAdminVolumeAndMount(t *testing.T) { - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - } - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - clusterInfo := clienttest.CreateTestClusterInfo(1) - s := GetSecretStore(ctx, clusterInfo, ownerInfo) - - clusterInfo.CephCred.Secret = "adminsecretkey" - err := s.Admin().CreateOrUpdate(clusterInfo) - assert.NoError(t, err) - - v := Volume().Admin() - m := VolumeMount().Admin() - // Test that the secret will make it into containers with the appropriate filename at the - // location where it is expected. - assert.Equal(t, v.Name, m.Name) - assert.Equal(t, "rook-ceph-admin-keyring", v.VolumeSource.Secret.SecretName) - assert.Equal(t, VolumeMount().AdminKeyringFilePath(), path.Join(m.MountPath, keyringFileName)) -} diff --git a/pkg/operator/ceph/config/keyring/store.go b/pkg/operator/ceph/config/keyring/store.go deleted file mode 100644 index 799bca9bb..000000000 --- a/pkg/operator/ceph/config/keyring/store.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package keyring provides methods for accessing keyrings for Ceph daemons stored securely in -// Kubernetes secrets. It also provides methods for creating keyrings with desired permissions which -// are stored persistently and a special subset of methods for the Ceph admin keyring. -package keyring - -import ( - "context" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-cfg-keyring") - -const ( - keyringFileName = "keyring" -) - -// SecretStore is a helper to store Ceph daemon keyrings as Kubernetes secrets. -type SecretStore struct { - context *clusterd.Context - clusterInfo *client.ClusterInfo - ownerInfo *k8sutil.OwnerInfo -} - -// GetSecretStore returns a new SecretStore struct. -func GetSecretStore(context *clusterd.Context, clusterInfo *client.ClusterInfo, ownerInfo *k8sutil.OwnerInfo) *SecretStore { - return &SecretStore{ - context: context, - clusterInfo: clusterInfo, - ownerInfo: ownerInfo, - } -} - -func keyringSecretName(resourceName string) string { - return resourceName + "-keyring" // all keyrings named by suffixing keyring to the resource name -} - -// GenerateKey generates a key for a Ceph user with the given access permissions. It returns the key -// generated on success. Ceph will always return the most up-to-date key for a daemon, and the key -// usually does not change. -func (k *SecretStore) GenerateKey(user string, access []string) (string, error) { - // get-or-create-key for the user account - key, err := client.AuthGetOrCreateKey(k.context, k.clusterInfo, user, access) - if err != nil { - logger.Infof("Error getting or creating key for %q. "+ - "Attempting to update capabilities in case the user already exists. %v", user, err) - uErr := client.AuthUpdateCaps(k.context, k.clusterInfo, user, access) - if uErr != nil { - return "", errors.Wrapf(err, "failed to get, create, or update auth key for %s", user) - } - key, uErr = client.AuthGetKey(k.context, k.clusterInfo, user) - if uErr != nil { - return "", errors.Wrapf(err, "failed to get key after updating existing auth capabilities for %s", user) - } - } - return key, nil -} - -// CreateOrUpdate creates or updates the keyring secret for the resource with the keyring specified. -// WARNING: Do not use "rook-ceph-admin" as the resource name; conflicts with the AdminStore. -func (k *SecretStore) CreateOrUpdate(resourceName string, keyring string) error { - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: keyringSecretName(resourceName), - Namespace: k.clusterInfo.Namespace, - }, - StringData: map[string]string{ - keyringFileName: keyring, - }, - Type: k8sutil.RookType, - } - err := k.ownerInfo.SetControllerReference(secret) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to keyring secret %q", secret.Name) - } - - return k.CreateSecret(secret) -} - -// Delete deletes the keyring secret for the resource. -func (k *SecretStore) Delete(resourceName string) error { - ctx := context.TODO() - secretName := keyringSecretName(resourceName) - err := k.context.Clientset.CoreV1().Secrets(k.clusterInfo.Namespace).Delete(ctx, secretName, metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - logger.Warningf("failed to delete keyring secret for %q. user may need to delete the resource manually. %v", secretName, err) - } - - return nil -} - -// CreateSecret creates or update a kubernetes secret -func (k *SecretStore) CreateSecret(secret *v1.Secret) error { - ctx := context.TODO() - secretName := secret.ObjectMeta.Name - _, err := k.context.Clientset.CoreV1().Secrets(k.clusterInfo.Namespace).Get(ctx, secretName, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debugf("creating secret for %s", secretName) - if _, err := k.context.Clientset.CoreV1().Secrets(k.clusterInfo.Namespace).Create(ctx, secret, metav1.CreateOptions{}); err != nil { - return errors.Wrapf(err, "failed to create secret for %s", secretName) - } - return nil - } - return errors.Wrapf(err, "failed to get secret for %s", secretName) - } - - logger.Debugf("updating secret for %s", secretName) - if _, err := k.context.Clientset.CoreV1().Secrets(k.clusterInfo.Namespace).Update(ctx, secret, metav1.UpdateOptions{}); err != nil { - return errors.Wrapf(err, "failed to update secret for %s", secretName) - } - return nil -} diff --git a/pkg/operator/ceph/config/keyring/store_test.go b/pkg/operator/ceph/config/keyring/store_test.go deleted file mode 100644 index 603a1e069..000000000 --- a/pkg/operator/ceph/config/keyring/store_test.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package keyring - -import ( - "context" - "path" - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - testop "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestGenerateKey(t *testing.T) { - clientset := testop.New(t, 1) - var generateKey = "" - var failGenerateKey = false - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if failGenerateKey { - return "", errors.New("test error") - } - return "{\"key\": \"" + generateKey + "\"}", nil - }, - } - ctx := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - } - ns := "rook-ceph" - ownerInfo := k8sutil.OwnerInfo{} - s := GetSecretStore(ctx, &cephclient.ClusterInfo{Namespace: ns}, &ownerInfo) - - generateKey = "generatedsecretkey" - failGenerateKey = false - k, e := s.GenerateKey("testuser", []string{"test", "access"}) - assert.NoError(t, e) - assert.Equal(t, "generatedsecretkey", k) - - generateKey = "differentsecretkey" - failGenerateKey = false - k, e = s.GenerateKey("testuser", []string{"test", "access"}) - assert.NoError(t, e) - assert.Equal(t, "differentsecretkey", k) - - // make sure error on fail - generateKey = "failgeneratekey" - failGenerateKey = true - _, e = s.GenerateKey("newuser", []string{"new", "access"}) - assert.Error(t, e) -} - -func TestKeyringStore(t *testing.T) { - ctxt := context.TODO() - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - } - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - ns := "rook-ceph" - k := GetSecretStore(ctx, &cephclient.ClusterInfo{Namespace: ns}, ownerInfo) - - assertKeyringData := func(keyringName, expectedKeyring string) { - s, e := clientset.CoreV1().Secrets(ns).Get(ctxt, keyringName, metav1.GetOptions{}) - assert.NoError(t, e) - assert.Equal(t, 1, len(s.StringData)) - assert.Equal(t, expectedKeyring, s.StringData["keyring"]) - assert.Equal(t, k8sutil.RookType, string(s.Type)) - } - - assertDoesNotExist := func(keyringName string) { - _, e := clientset.CoreV1().Secrets(ns).Get(ctxt, keyringName, metav1.GetOptions{}) - assert.True(t, kerrors.IsNotFound(e)) - } - - // create first key - err := k.CreateOrUpdate("test-resource", "qwertyuiop") - assert.NoError(t, err) - assertKeyringData("test-resource-keyring", "qwertyuiop") - - // create second key - err = k.CreateOrUpdate("second-resource", "asdfghjkl") - assert.NoError(t, err) - assertKeyringData("test-resource-keyring", "qwertyuiop") - assertKeyringData("second-resource-keyring", "asdfghjkl") - - // update a key - err = k.CreateOrUpdate("second-resource", "lkjhgfdsa") - assert.NoError(t, err) - assertKeyringData("test-resource-keyring", "qwertyuiop") - assertKeyringData("second-resource-keyring", "lkjhgfdsa") - - // delete a key - err = k.Delete("test-resource") - assert.NoError(t, err) - assertDoesNotExist("test-resource-keyring") - assertKeyringData("second-resource-keyring", "lkjhgfdsa") -} - -func TestResourceVolumeAndMount(t *testing.T) { - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - } - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - k := GetSecretStore(ctx, &cephclient.ClusterInfo{Namespace: "ns"}, ownerInfo) - err := k.CreateOrUpdate("test-resource", "qwertyuiop") - assert.NoError(t, err) - err = k.CreateOrUpdate("second-resource", "asdfgyhujkl") - assert.NoError(t, err) - - v := Volume().Resource("test-resource") - m := VolumeMount().Resource("test-resource") - // Test that the secret will make it into containers with the appropriate filename at the - // location where it is expected. - assert.Equal(t, v.Name, m.Name) - assert.Equal(t, "test-resource-keyring", v.VolumeSource.Secret.SecretName) - assert.Equal(t, VolumeMount().KeyringFilePath(), path.Join(m.MountPath, keyringFileName)) -} diff --git a/pkg/operator/ceph/config/keyring/volume.go b/pkg/operator/ceph/config/keyring/volume.go deleted file mode 100644 index 67e1b89ab..000000000 --- a/pkg/operator/ceph/config/keyring/volume.go +++ /dev/null @@ -1,96 +0,0 @@ -package keyring - -import ( - "path" - - v1 "k8s.io/api/core/v1" -) - -const ( - keyringDir = "/etc/ceph/keyring-store/" - - // admin keyring path must be different from keyring path so that the two keyrings can be - // mounted independently - adminKeyringDir = "/etc/ceph/admin-keyring-store/" - crashCollectorKeyringDir = "/etc/ceph/crash-collector-keyring-store/" -) - -// VolumeBuilder is a helper for creating Kubernetes pod volumes with content sourced by keyrings -// stored in the SecretStore. -type VolumeBuilder struct{} - -// VolumeMountBuilder is a helper for creating Kubernetes container volume mounts that mount the -// keyring content from VolumeBuilder volumes. -type VolumeMountBuilder struct{} - -// Volume returns a VolumeBuilder. -func Volume() *VolumeBuilder { return &VolumeBuilder{} } - -// Resource returns a Kubernetes pod volume whose content is sourced by the keyring created for the -// resource using a SecretStore. -func (v *VolumeBuilder) Resource(resourceName string) v1.Volume { - return v1.Volume{ - Name: keyringSecretName(resourceName), - VolumeSource: v1.VolumeSource{Secret: &v1.SecretVolumeSource{ - SecretName: keyringSecretName(resourceName), - }}, - } -} - -// Admin returns a kubernetes pod volume whose content is sourced by the SecretStore admin keyring. -func (v *VolumeBuilder) Admin() v1.Volume { - return v.Resource(adminKeyringResourceName) -} - -// CrashCollector returns a kubernetes pod volume whose content is sourced by the SecretStore crash collector keyring. -func (v *VolumeBuilder) CrashCollector() v1.Volume { - return v.Resource(crashCollectorKeyringResourceName) -} - -// VolumeMount returns a VolumeMountBuilder. -func VolumeMount() *VolumeMountBuilder { return &VolumeMountBuilder{} } - -// Resource returns a Kubernetes container volume mount that mounts the content from the matching -// VolumeBuilder Resource volume for the same resource. -func (*VolumeMountBuilder) Resource(resourceName string) v1.VolumeMount { - return v1.VolumeMount{ - Name: keyringSecretName(resourceName), - ReadOnly: true, // should be no reason to write to the keyring in pods, so enforce this - MountPath: keyringDir, - } -} - -// Admin returns a Kubernetes container volume mount that mounts the content from the matching -// VolumeBuilder Admin volume. -func (*VolumeMountBuilder) Admin() v1.VolumeMount { - return v1.VolumeMount{ - Name: keyringSecretName(adminKeyringResourceName), - ReadOnly: true, // should be no reason to write to the keyring in pods, so enforce this - MountPath: adminKeyringDir, - } -} - -// CrashCollector returns a Kubernetes container volume mount that mounts the content from the matching -// VolumeBuilder Crash Collector volume. -func (*VolumeMountBuilder) CrashCollector() v1.VolumeMount { - return v1.VolumeMount{ - Name: keyringSecretName(crashCollectorKeyringResourceName), - ReadOnly: true, // should be no reason to write to the keyring in pods, so enforce this - MountPath: crashCollectorKeyringDir, - } -} - -// KeyringFilePath returns the full path to the regular keyring file within a container. -func (*VolumeMountBuilder) KeyringFilePath() string { - return path.Join(keyringDir, keyringFileName) -} - -// AdminKeyringFilePath returns the full path to the admin keyring file within a container. -func (*VolumeMountBuilder) AdminKeyringFilePath() string { - return path.Join(adminKeyringDir, keyringFileName) -} - -// CrashCollectorKeyringFilePath returns the full path to the admin keyring file within a container. -func (*VolumeMountBuilder) CrashCollectorKeyringFilePath() string { - return path.Join(crashCollectorKeyringDir, keyringFileName) -} diff --git a/pkg/operator/ceph/config/livenessprobe.go b/pkg/operator/ceph/config/livenessprobe.go deleted file mode 100644 index 0c6df3019..000000000 --- a/pkg/operator/ceph/config/livenessprobe.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config allows a ceph config file to be stored in Kubernetes and mounted as volumes into -// Ceph daemon containers. -package config - -import ( - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" - v1 "k8s.io/api/core/v1" -) - -type fn func(cephv1.CephClusterHealthCheckSpec) *v1.Probe - -// ConfigureLivenessProbe returns the desired liveness probe for a given daemon -func ConfigureLivenessProbe(daemon rook.KeyType, container v1.Container, healthCheck cephv1.CephClusterHealthCheckSpec) v1.Container { - // Map of functions - probeFnMap := map[rook.KeyType]fn{ - cephv1.KeyMon: cephv1.GetMonLivenessProbe, - cephv1.KeyMgr: cephv1.GetMgrLivenessProbe, - cephv1.KeyOSD: cephv1.GetOSDLivenessProbe, - cephv1.KeyMds: cephv1.GetMdsLivenessProbe, - } - - if _, ok := healthCheck.LivenessProbe[daemon]; ok { - if healthCheck.LivenessProbe[daemon].Disabled { - container.LivenessProbe = nil - } else { - probe := probeFnMap[daemon](healthCheck) - // If the spec value is not empty, let's apply it along with default when some fields are not specified - if probe != nil { - // Set the liveness probe on the container to overwrite the default probe created by Rook - container.LivenessProbe = GetLivenessProbeWithDefaults(probe, container.LivenessProbe) - } - } - } - - return container -} - -func GetLivenessProbeWithDefaults(desiredProbe, currentProbe *v1.Probe) *v1.Probe { - newProbe := *desiredProbe - - // Do not replace the handler with the previous one! - // On the first iteration, the handler appears empty and is then replaced by whatever first daemon value comes in - // e.g: [env -i sh -c ceph --admin-daemon /run/ceph/ceph-mon.b.asok mon_status] - meaning mon b was the first picked in the list of mons - // On the second iteration the value of mon b remains, since the pointer has been allocated - // This means the handler is not empty anymore and not replaced by the current one which it should - // - // Let's always force the default handler, there is no reason to change it anyway since the underlying content is generated based on the daemon's name - // so we can not make it generic via the spec - newProbe.Handler = currentProbe.Handler - - // If the user has not specified thresholds and timeouts, set them to the same values as - // in the default liveness probe created by Rook. - if newProbe.FailureThreshold == 0 { - newProbe.FailureThreshold = currentProbe.FailureThreshold - } - if newProbe.PeriodSeconds == 0 { - newProbe.PeriodSeconds = currentProbe.PeriodSeconds - } - if newProbe.SuccessThreshold == 0 { - newProbe.SuccessThreshold = currentProbe.SuccessThreshold - } - if newProbe.TimeoutSeconds == 0 { - newProbe.TimeoutSeconds = currentProbe.TimeoutSeconds - } - if newProbe.InitialDelaySeconds == 0 { - newProbe.InitialDelaySeconds = currentProbe.InitialDelaySeconds - } - - return &newProbe -} diff --git a/pkg/operator/ceph/config/livenessprobe_test.go b/pkg/operator/ceph/config/livenessprobe_test.go deleted file mode 100644 index e2430392e..000000000 --- a/pkg/operator/ceph/config/livenessprobe_test.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config allows a ceph config file to be stored in Kubernetes and mounted as volumes into -// Ceph daemon containers. -package config - -import ( - "reflect" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -func TestConfigureLivenessProbe(t *testing.T) { - keyTypes := []rook.KeyType{ - cephv1.KeyMds, - cephv1.KeyMon, - cephv1.KeyMgr, - cephv1.KeyOSD, - } - - for _, keyType := range keyTypes { - configLivenessProbeHelper(t, keyType) - } -} - -func configLivenessProbeHelper(t *testing.T, keyType rook.KeyType) { - p := &v1.Probe{ - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Path: "/", - Port: intstr.FromInt(8080), - }, - }, - } - container := v1.Container{LivenessProbe: p} - l := map[rook.KeyType]*cephv1.ProbeSpec{keyType: {Disabled: true}} - type args struct { - daemon rook.KeyType - container v1.Container - healthCheck cephv1.CephClusterHealthCheckSpec - } - tests := []struct { - name string - args args - want v1.Container - }{ - {"probe-enabled", args{keyType, container, cephv1.CephClusterHealthCheckSpec{}}, container}, - {"probe-disabled", args{keyType, container, cephv1.CephClusterHealthCheckSpec{LivenessProbe: l}}, v1.Container{}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := ConfigureLivenessProbe(tt.args.daemon, tt.args.container, tt.args.healthCheck); !reflect.DeepEqual(got, tt.want) { - t.Errorf("ConfigureLivenessProbe() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetLivenessProbeWithDefaults(t *testing.T) { - t.Run("using default probe", func(t *testing.T) { - currentProb := &v1.Probe{ - Handler: v1.Handler{ - Exec: &v1.ExecAction{ - // Example: - Command: []string{ - "env", - "-i", - "sh", - "-c", - "ceph --admin-daemon /run/ceph/ceph-mon.c.asok mon_status", - }, - }, - }, - InitialDelaySeconds: 10, - } - // in case of default probe - desiredProbe := &v1.Probe{} - desiredProbe = GetLivenessProbeWithDefaults(desiredProbe, currentProb) - assert.Equal(t, desiredProbe, currentProb) - }) - - t.Run("overriding default probes", func(t *testing.T) { - currentProb := &v1.Probe{ - Handler: v1.Handler{ - Exec: &v1.ExecAction{ - // Example: - Command: []string{ - "env", - "-i", - "sh", - "-c", - "ceph --admin-daemon /run/ceph/ceph-mon.c.asok mon_status", - }, - }, - }, - InitialDelaySeconds: 10, - } - - desiredProbe := &v1.Probe{ - Handler: v1.Handler{ - Exec: &v1.ExecAction{ - // Example: - Command: []string{ - "env", - "-i", - "sh", - "-c", - "ceph --admin-daemon /run/ceph/ceph-mon.foo.asok mon_status", - }, - }, - }, - InitialDelaySeconds: 1, - FailureThreshold: 2, - PeriodSeconds: 3, - SuccessThreshold: 4, - TimeoutSeconds: 5, - } - desiredProbe = GetLivenessProbeWithDefaults(desiredProbe, currentProb) - assert.Equal(t, desiredProbe.Exec.Command, []string{"env", "-i", "sh", "-c", "ceph --admin-daemon /run/ceph/ceph-mon.c.asok mon_status"}) - assert.Equal(t, desiredProbe.InitialDelaySeconds, int32(1)) - assert.Equal(t, desiredProbe.FailureThreshold, int32(2)) - assert.Equal(t, desiredProbe.PeriodSeconds, int32(3)) - assert.Equal(t, desiredProbe.SuccessThreshold, int32(4)) - assert.Equal(t, desiredProbe.TimeoutSeconds, int32(5)) - }) -} diff --git a/pkg/operator/ceph/config/monstore.go b/pkg/operator/ceph/config/monstore.go deleted file mode 100644 index fed4ae2cf..000000000 --- a/pkg/operator/ceph/config/monstore.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "encoding/json" - "strings" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" -) - -// MonStore provides methods for setting Ceph configurations in the centralized mon -// configuration database. -type MonStore struct { - context *clusterd.Context - clusterInfo *client.ClusterInfo -} - -// GetMonStore returns a new MonStore for the cluster. -func GetMonStore(context *clusterd.Context, clusterInfo *client.ClusterInfo) *MonStore { - return &MonStore{ - context: context, - clusterInfo: clusterInfo, - } -} - -// Option defines the pieces of information relevant to Ceph configuration options. -type Option struct { - // Who is the entity(-ies) the option should apply to. - Who string - - // Option is the option key - Option string - - // Value is the value for the option - Value string -} - -func (m *MonStore) SetIfChanged(who, option, value string) (bool, error) { - currentVal, err := m.Get(who, option) - if err != nil { - return false, errors.Wrapf(err, "failed to get value %q", option) - } - if currentVal == value { - // no need to update the setting - return false, nil - } - - if err := m.Set(who, option, value); err != nil { - return false, errors.Wrapf(err, "failed to set value %s=%s", option, value) - } - return true, nil -} - -// Set sets a config in the centralized mon configuration database. -// https://docs.ceph.com/docs/master/rados/configuration/ceph-conf/#monitor-configuration-database -func (m *MonStore) Set(who, option, value string) error { - logger.Infof("setting %q=%q=%q option to the mon configuration database", who, option, value) - args := []string{"config", "set", who, normalizeKey(option), value} - cephCmd := client.NewCephCommand(m.context, m.clusterInfo, args) - out, err := cephCmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to set ceph config in the centralized mon configuration database; "+ - "you may need to use the rook-config-override ConfigMap. output: %s", string(out)) - } - - logger.Infof("successfully set %q=%q=%q option to the mon configuration database", who, option, value) - return nil -} - -// Delete a config in the centralized mon configuration database. -func (m *MonStore) Delete(who, option string) error { - logger.Infof("deleting %q option from the mon configuration database", option) - args := []string{"config", "rm", who, normalizeKey(option)} - cephCmd := client.NewCephCommand(m.context, m.clusterInfo, args) - out, err := cephCmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to delete ceph config in the centralized mon configuration database. output: %s", - string(out)) - } - - logger.Infof("successfully deleted %q option from the mon configuration database", option) - return nil -} - -// Get retrieves a config in the centralized mon configuration database. -// https://docs.ceph.com/docs/master/rados/configuration/ceph-conf/#monitor-configuration-database -func (m *MonStore) Get(who, option string) (string, error) { - args := []string{"config", "get", who, normalizeKey(option)} - cephCmd := client.NewCephCommand(m.context, m.clusterInfo, args) - out, err := cephCmd.Run() - if err != nil { - return "", errors.Wrapf(err, "failed to get config setting %q for user %q", option, who) - } - return strings.TrimSpace(string(out)), nil -} - -// GetDaemon retrieves all configs for a specific daemon in the centralized mon configuration database. -func (m *MonStore) GetDaemon(who string) ([]Option, error) { - args := []string{"config", "get", who} - cephCmd := client.NewCephCommand(m.context, m.clusterInfo, args) - out, err := cephCmd.Run() - if err != nil { - return []Option{}, errors.Wrapf(err, "failed to get config for daemon %q. output: %s", who, string(out)) - } - var result map[string]interface{} - err = json.Unmarshal(out, &result) - if err != nil { - return []Option{}, errors.Wrapf(err, "failed to parse json config for daemon %q. json: %s", who, string(out)) - } - daemonOptions := []Option{} - for k := range result { - v := result[k].(map[string]interface{}) - optionWho := v["section"].(string) - // Only get specialized options (don't take global one) - if optionWho == who { - daemonOptions = append(daemonOptions, Option{optionWho, k, v["value"].(string)}) - } - } - return daemonOptions, nil -} - -// DeleteDaemon delete all configs for a specific daemon in the centralized mon configuration database. -func (m *MonStore) DeleteDaemon(who string) error { - configOptions, err := m.GetDaemon(who) - if err != nil { - return errors.Wrapf(err, "failed to get daemon config for %q", who) - } - - for _, option := range configOptions { - err := m.Delete(who, option.Option) - if err != nil { - return errors.Wrapf(err, "failed to delete option %q on %q", option.Option, who) - } - } - return nil -} - -// SetAll sets all configs from the overrides in the centralized mon configuration database. -// See MonStore.Set for more. -func (m *MonStore) SetAll(options ...Option) error { - var errs []error - for _, override := range options { - err := m.Set(override.Who, override.Option, override.Value) - if err != nil { - errs = append(errs, err) - } - } - if len(errs) > 0 { - retErr := errors.New("failed to set one or more Ceph configs") - for _, err := range errs { - retErr = errors.Wrapf(err, "%v", retErr) - } - return retErr - } - return nil -} - -// DeleteAll deletes all provided configs from the overrides in the centralized mon configuration database. -// See MonStore.Delete for more. -func (m *MonStore) DeleteAll(options ...Option) error { - var errs []error - for _, override := range options { - err := m.Delete(override.Who, override.Option) - if err != nil { - errs = append(errs, err) - } - } - if len(errs) > 0 { - retErr := errors.New("failed to delete one or more Ceph configs") - for _, err := range errs { - retErr = errors.Wrapf(err, "%v", retErr) - } - return retErr - } - return nil -} diff --git a/pkg/operator/ceph/config/monstore_test.go b/pkg/operator/ceph/config/monstore_test.go deleted file mode 100644 index 1d0978a26..000000000 --- a/pkg/operator/ceph/config/monstore_test.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "reflect" - "strings" - "testing" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - testop "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestMonStore_Set(t *testing.T) { - executor := &exectest.MockExecutor{} - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - } - - // create a mock command runner which creates a simple string of the command it ran, and allow - // us to cause it to return an error when it detects a keyword. - execedCmd := "" - execInjectErr := false - executor.MockExecuteCommandWithOutput = - func(command string, args ...string) (string, error) { - execedCmd = command + " " + strings.Join(args, " ") - if execInjectErr { - return "output from cmd with error", errors.New("mocked error") - } - return "", nil - } - - monStore := GetMonStore(ctx, &client.ClusterInfo{Namespace: "ns"}) - - // setting with spaces converts to underscores - e := monStore.Set("global", "debug ms", "10") - assert.NoError(t, e) - assert.Contains(t, execedCmd, "config set global debug_ms 10") - - // setting with dashes converts to underscores - e = monStore.Set("osd.0", "debug-osd", "20") - assert.NoError(t, e) - assert.Contains(t, execedCmd, " config set osd.0 debug_osd 20 ") - - // setting with underscores stays the same - e = monStore.Set("mds.*", "debug_mds", "15") - assert.NoError(t, e) - assert.Contains(t, execedCmd, " config set mds.* debug_mds 15 ") - - // errors returned as expected - execInjectErr = true - e = monStore.Set("mon.*", "unknown_setting", "10") - assert.Error(t, e) - assert.Contains(t, execedCmd, " config set mon.* unknown_setting 10 ") -} - -func TestMonStore_Delete(t *testing.T) { - executor := &exectest.MockExecutor{} - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - } - - // create a mock command runner which creates a simple string of the command it ran, and allow - // us to cause it to return an error when it detects a keyword. - execedCmd := "" - execInjectErr := false - executor.MockExecuteCommandWithOutput = - func(command string, args ...string) (string, error) { - execedCmd = command + " " + strings.Join(args, " ") - if execInjectErr { - return "output from cmd with error", errors.New("mocked error") - } - return "", nil - } - - monStore := GetMonStore(ctx, &client.ClusterInfo{Namespace: "ns"}) - - // ceph config rm called as expected - e := monStore.Delete("global", "debug ms") - assert.NoError(t, e) - assert.Contains(t, execedCmd, "config rm global debug_ms") - - // errors returned as expected - execInjectErr = true - e = monStore.Delete("mon.*", "unknown_setting") - assert.Error(t, e) - assert.Contains(t, execedCmd, " config rm mon.* unknown_setting ") -} - -func TestMonStore_GetDaemon(t *testing.T) { - executor := &exectest.MockExecutor{} - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - } - - // create a mock command runner which creates a simple string of the command it ran, and allow - // us to cause it to return an error when it detects a keyword and to return a specific string - execedCmd := "" - execReturn := "{\"rbd_default_features\":{\"value\":\"3\",\"section\":\"global\",\"mask\":{}," + - "\"can_update_at_runtime\":true}," + - "\"rgw_enable_usage_log\":{\"value\":\"true\",\"section\":\"client.rgw.test.a\",\"mask\":{}," + - "\"can_update_at_runtime\":true}}" - execInjectErr := false - executor.MockExecuteCommandWithOutput = - func(command string, args ...string) (string, error) { - execedCmd = command + " " + strings.Join(args, " ") - if execInjectErr { - return "output from cmd with error", errors.New("mocked error") - } - return execReturn, nil - } - - monStore := GetMonStore(ctx, &client.ClusterInfo{Namespace: "ns"}) - - // ceph config get called as expected - options, e := monStore.GetDaemon("client.rgw.test.a") - assert.NoError(t, e) - assert.Contains(t, execedCmd, "ceph config get client.rgw.test.a") - assert.True(t, reflect.DeepEqual(options, []Option{{"client.rgw.test.a", "rgw_enable_usage_log", "true"}})) - - // json parse exception return as expected - execReturn = "bad json output" - _, e = monStore.GetDaemon("client.rgw.test.a") - assert.Error(t, e) - assert.Contains(t, e.Error(), "failed to parse json config for daemon \"client.rgw.test.a\". json: "+ - "bad json output") - - // errors returned as expected - execInjectErr = true - _, e = monStore.GetDaemon("mon.*") - assert.Error(t, e) - assert.Contains(t, execedCmd, " config get mon.* ") -} - -func TestMonStore_DeleteDaemon(t *testing.T) { - executor := &exectest.MockExecutor{} - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - } - - // create a mock command runner which creates a simple string of the command it ran, and allow - // us to cause it to return an error when it detects a keyword and to return a specific string - execedCmd := "" - execReturn := "{\"rbd_default_features\":{\"value\":\"3\",\"section\":\"global\",\"mask\":{}," + - "\"can_update_at_runtime\":true}," + - "\"rgw_enable_usage_log\":{\"value\":\"true\",\"section\":\"client.rgw.test.a\",\"mask\":{}," + - "\"can_update_at_runtime\":true}}" - executor.MockExecuteCommandWithOutput = - func(command string, args ...string) (string, error) { - execedCmd = command + " " + strings.Join(args, " ") - return execReturn, nil - } - - monStore := GetMonStore(ctx, &client.ClusterInfo{Namespace: "ns"}) - - // ceph config rm rgw_enable_usage_log called as expected - e := monStore.DeleteDaemon("client.rgw.test.a") - assert.NoError(t, e) - assert.Contains(t, execedCmd, "ceph config rm client.rgw.test.a rgw_enable_usage_log") -} - -func TestMonStore_SetAll(t *testing.T) { - clientset := testop.New(t, 1) - executor := &exectest.MockExecutor{} - ctx := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - } - - // create a mock command runner which creates a simple string of the command it ran, and allow - // us to cause it to return an error when it detects a keyword. - execedCmds := []string{} - execInjectErrOnKeyword := "donotinjectanerror" - executor.MockExecuteCommandWithOutput = - func(command string, args ...string) (string, error) { - execedCmd := command + " " + strings.Join(args, " ") - execedCmds = append(execedCmds, execedCmd) - k := execInjectErrOnKeyword - if strings.Contains(execedCmd, k) { - return "output from cmd with error on keyword: " + k, errors.Errorf("mocked error on keyword: " + k) - } - return "", nil - } - - monStore := GetMonStore(ctx, &client.ClusterInfo{Namespace: "ns"}) - - cfgOverrides := []Option{ - configOverride("global", "debug ms", "10"), // setting w/ spaces converts to underscores - configOverride("osd.0", "debug-osd", "20"), // setting w/ dashes converts to underscores - configOverride("mds.*", "debug_mds", "15"), // setting w/ underscores remains the same - } - - // commands w/ no error - e := monStore.SetAll(cfgOverrides...) - assert.NoError(t, e) - assert.Len(t, execedCmds, 3) - assert.Contains(t, execedCmds[0], " global debug_ms 10 ") - assert.Contains(t, execedCmds[1], " osd.0 debug_osd 20 ") - assert.Contains(t, execedCmds[2], " mds.* debug_mds 15 ") - - // commands w/ one error - // keep cfgOverrides from last test - execInjectErrOnKeyword = "debug_osd" - execedCmds = execedCmds[:0] // empty execedCmds slice - e = monStore.SetAll(cfgOverrides...) - assert.Error(t, e) - // Rook should not return error before trying to set all config overrides - assert.Len(t, execedCmds, 3) - - // all commands return error - // keep cfgOverrides - execInjectErrOnKeyword = "debug" - execedCmds = execedCmds[:0] - e = monStore.SetAll(cfgOverrides...) - assert.Error(t, e) - assert.Len(t, execedCmds, 3) -} diff --git a/pkg/operator/ceph/config/network.go b/pkg/operator/ceph/config/network.go deleted file mode 100644 index 68478f448..000000000 --- a/pkg/operator/ceph/config/network.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config provides default configurations which Rook will set in Ceph clusters. -package config - -import ( - "context" - "fmt" - "strings" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // PublicNetworkSelectorKeyName is the network selector key for the ceph public network - PublicNetworkSelectorKeyName = "public" - // ClusterNetworkSelectorKeyName is the network selector key for the ceph cluster network - ClusterNetworkSelectorKeyName = "cluster" - // WhereaboutsIPAMType is Whereabouts IPAM type - WhereaboutsIPAMType = "whereabouts" - hostLocalIPAMType = "host-local" - staticIPAMType = "static" -) - -var ( - // NetworkSelectors is a slice of ceph network selector key name - NetworkSelectors = []string{PublicNetworkSelectorKeyName, ClusterNetworkSelectorKeyName} -) - -func generateNetworkSettings(clusterdContext *clusterd.Context, namespace string, networkSelectors map[string]string) ([]Option, error) { - ctx := context.TODO() - cephNetworks := []Option{} - - for _, selectorKey := range NetworkSelectors { - // skip if selector is not specified - if _, ok := networkSelectors[selectorKey]; !ok { - continue - } - - multusNamespace, nad := GetMultusNamespace(networkSelectors[selectorKey]) - if multusNamespace == "" { - multusNamespace = namespace - } - // Get network attachment definition - netDefinition, err := clusterdContext.NetworkClient.NetworkAttachmentDefinitions(multusNamespace).Get(ctx, nad, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - return []Option{}, errors.Wrapf(err, "specified network attachment definition %q in namespace %q for selector %q does not exist", nad, namespace, selectorKey) - } - return []Option{}, errors.Wrapf(err, "failed to fetch network attachment definition for selector %q", selectorKey) - } - - // Get network attachment definition configuration - netConfig, err := k8sutil.GetNetworkAttachmentConfig(*netDefinition) - if err != nil { - return []Option{}, errors.Wrapf(err, "failed to get network attachment definition configuration for selector %q", selectorKey) - } - - networkRange := getNetworkRange(netConfig) - if networkRange != "" { - cephNetworks = append(cephNetworks, configOverride("global", fmt.Sprintf("%s_network", selectorKey), networkRange)) - } else { - return []Option{}, errors.Errorf("empty subnet from network attachment definition %q", networkSelectors[selectorKey]) - } - } - - return cephNetworks, nil -} - -func GetMultusNamespace(nad string) (string, string) { - tmp := strings.Split(nad, "/") - if len(tmp) == 2 { - return tmp[0], tmp[1] - } - return "", nad -} - -func getNetworkRange(netConfig k8sutil.NetworkAttachmentConfig) string { - var subnets []string - - switch netConfig.Ipam.Type { - case hostLocalIPAMType: - if netConfig.Ipam.Subnet != "" { - return netConfig.Ipam.Subnet - } - for _, netRanges := range netConfig.Ipam.Ranges { - for _, netRange := range netRanges { - subnets = append(subnets, netRange.Subnet) - } - } - return strings.Join(subnets, ",") - - case staticIPAMType: - for _, subnet := range netConfig.Ipam.Addresses { - subnets = append(subnets, subnet.Address) - } - - return strings.Join(subnets, ",") - - case WhereaboutsIPAMType: - return netConfig.Ipam.Range - - default: - return "" - } -} diff --git a/pkg/operator/ceph/config/network_test.go b/pkg/operator/ceph/config/network_test.go deleted file mode 100644 index 00df7f677..000000000 --- a/pkg/operator/ceph/config/network_test.go +++ /dev/null @@ -1,351 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "context" - "fmt" - "testing" - - networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - fakenetclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - testop "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestGenerateNetworkSettings(t *testing.T) { - t.Run("no network definition exists", func(*testing.T) { - ns := "rook-ceph" - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - NetworkClient: fakenetclient.NewSimpleClientset().K8sCniCncfIoV1(), - } - netSelector := map[string]string{"public": "public-network-attach-def"} - _, err := generateNetworkSettings(ctx, ns, netSelector) - assert.Error(t, err) - - }) - - t.Run("only cluster network", func(*testing.T) { - netSelector := map[string]string{"cluster": "cluster-network-attach-def"} - networks := []networkv1.NetworkAttachmentDefinition{getClusterNetwork()} - expectedNetworks := []Option{ - { - Who: "global", - Option: "cluster_network", - Value: "172.18.0.0/16", - }, - } - ctxt := context.TODO() - ns := "rook-ceph" - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - NetworkClient: fakenetclient.NewSimpleClientset().K8sCniCncfIoV1(), - } - - for i := range networks { - _, err := ctx.NetworkClient.NetworkAttachmentDefinitions(ns).Create(ctxt, &networks[i], metav1.CreateOptions{}) - assert.NoError(t, err) - } - cephNetwork, err := generateNetworkSettings(ctx, ns, netSelector) - assert.NoError(t, err) - assert.ElementsMatch(t, cephNetwork, expectedNetworks, fmt.Sprintf("networks: %+v", cephNetwork)) - }) - - t.Run("only public network", func(*testing.T) { - ctxt := context.TODO() - ns := "rook-ceph" - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - NetworkClient: fakenetclient.NewSimpleClientset().K8sCniCncfIoV1(), - } - netSelector := map[string]string{"public": "public-network-attach-def"} - networks := []networkv1.NetworkAttachmentDefinition{getPublicNetwork()} - expectedNetworks := []Option{ - { - Who: "global", - Option: "public_network", - Value: "192.168.0.0/24", - }, - } - for i := range networks { - _, err := ctx.NetworkClient.NetworkAttachmentDefinitions(ns).Create(ctxt, &networks[i], metav1.CreateOptions{}) - assert.NoError(t, err) - } - cephNetwork, err := generateNetworkSettings(ctx, ns, netSelector) - assert.NoError(t, err) - assert.ElementsMatch(t, cephNetwork, expectedNetworks, fmt.Sprintf("networks: %+v", cephNetwork)) - - }) - - t.Run("public and cluster network", func(*testing.T) { - ctxt := context.TODO() - ns := "rook-ceph" - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - NetworkClient: fakenetclient.NewSimpleClientset().K8sCniCncfIoV1(), - } - netSelector := map[string]string{ - "public": "public-network-attach-def", - "cluster": "cluster-network-attach-def", - } - networks := []networkv1.NetworkAttachmentDefinition{getPublicNetwork(), getClusterNetwork()} - expectedNetworks := []Option{ - { - Who: "global", - Option: "public_network", - Value: "192.168.0.0/24", - }, - { - Who: "global", - Option: "cluster_network", - Value: "172.18.0.0/16", - }, - } - for i := range networks { - _, err := ctx.NetworkClient.NetworkAttachmentDefinitions(ns).Create(ctxt, &networks[i], metav1.CreateOptions{}) - assert.NoError(t, err) - } - cephNetwork, err := generateNetworkSettings(ctx, ns, netSelector) - assert.NoError(t, err) - assert.ElementsMatch(t, cephNetwork, expectedNetworks, fmt.Sprintf("networks: %+v", cephNetwork)) - - }) -} - -func getPublicNetwork() networkv1.NetworkAttachmentDefinition { - return networkv1.NetworkAttachmentDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: "public-network-attach-def", - }, - Spec: networkv1.NetworkAttachmentDefinitionSpec{ - Config: `{ - "cniVersion": "0.3.0", - "type": "macvlan", - "master": "eth2", - "mode": "bridge", - "ipam": { - "type": "host-local", - "subnet": "192.168.0.0/24", - "gateway": "172.18.8.1" - } - }`, - }, - } -} - -func getClusterNetwork() networkv1.NetworkAttachmentDefinition { - return networkv1.NetworkAttachmentDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-network-attach-def", - }, - Spec: networkv1.NetworkAttachmentDefinitionSpec{ - Config: `{ - "cniVersion": "0.3.0", - "type": "macvlan", - "master": "eth2", - "mode": "bridge", - "ipam": { - "type": "host-local", - "subnet": "172.18.0.0/16", - "gateway": "172.18.0.1" - } - }`, - }, - } -} - -func TestGetNetworkRange(t *testing.T) { - t.Run("simple host-local IPAM test", func(t *testing.T) { - ns := "rook-ceph" - nad := &networkv1.NetworkAttachmentDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: "public-network-attach-def", - Namespace: ns, - }, - Spec: networkv1.NetworkAttachmentDefinitionSpec{ - Config: `{ - "cniVersion": "0.3.0", - "type": "macvlan", - "master": "eth2", - "mode": "bridge", - "ipam": { - "type": "host-local", - "subnet": "", - "gateway": "172.18.8.1" - } - }`, - }, - } - - netConfig, err := k8sutil.GetNetworkAttachmentConfig(*nad) - assert.NoError(t, err) - - // - // TEST 1: subnet/range is empty - // - networkRange := getNetworkRange(netConfig) - assert.Empty(t, networkRange) - - // - // TEST 2: subnet is not empty - // - netConfig.Ipam.Type = "host-local" - netConfig.Ipam.Subnet = "192.168.0.0/24" - networkRange = getNetworkRange(netConfig) - assert.Equal(t, "192.168.0.0/24", networkRange) - }) - - t.Run("advanced host-local IPAM test", func(t *testing.T) { - ns := "rook-ceph" - nad := &networkv1.NetworkAttachmentDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: "public-network-attach-def", - Namespace: ns, - }, - Spec: networkv1.NetworkAttachmentDefinitionSpec{ - Config: `{ - "ipam": { - "type": "host-local", - "ranges": [ - [ - { - "subnet": "10.10.0.0/16", - "rangeStart": "10.10.1.20", - "rangeEnd": "10.10.3.50", - "gateway": "10.10.0.254" - }, - { - "subnet": "172.16.5.0/24" - } - ], - [ - { - "subnet": "3ffe:ffff:0:01ff::/64", - "rangeStart": "3ffe:ffff:0:01ff::0010", - "rangeEnd": "3ffe:ffff:0:01ff::0020" - } - ] - ], - "routes": [ - { "dst": "0.0.0.0/0" }, - { "dst": "192.168.0.0/16", "gw": "10.10.5.1" }, - { "dst": "3ffe:ffff:0:01ff::1/64" } - ], - "dataDir": "/run/my-orchestrator/container-ipam-state" - } -}`, - }, - } - - netConfig, err := k8sutil.GetNetworkAttachmentConfig(*nad) - assert.NoError(t, err) - networkRange := getNetworkRange(netConfig) - assert.Equal(t, "10.10.0.0/16,172.16.5.0/24,3ffe:ffff:0:01ff::/64", networkRange) - }) - - t.Run("advanced static IPAM test", func(t *testing.T) { - ns := "rook-ceph" - nad := &networkv1.NetworkAttachmentDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: "public-network-attach-def", - Namespace: ns, - }, - Spec: networkv1.NetworkAttachmentDefinitionSpec{ - Config: `{ - "ipam": { - "type": "static", - "addresses": [ - { - "address": "10.10.0.1/24", - "gateway": "10.10.0.254" - }, - { - "address": "3ffe:ffff:0:01ff::1/64", - "gateway": "3ffe:ffff:0::1" - } - ], - "routes": [ - { "dst": "0.0.0.0/0" }, - { "dst": "192.168.0.0/16", "gw": "10.10.5.1" }, - { "dst": "3ffe:ffff:0:01ff::1/64" } - ], - "dns": { - "nameservers" : ["8.8.8.8"], - "domain": "example.com", - "search": [ "example.com" ] - } - } -}`, - }, - } - netConfig, err := k8sutil.GetNetworkAttachmentConfig(*nad) - assert.NoError(t, err) - networkRange := getNetworkRange(netConfig) - assert.Equal(t, "10.10.0.1/24,3ffe:ffff:0:01ff::1/64", networkRange) - }) - - t.Run("advanced whereabouts IPAM test", func(t *testing.T) { - ns := "rook-ceph" - nad := &networkv1.NetworkAttachmentDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: "public-network-attach-def", - Namespace: ns, - }, - Spec: networkv1.NetworkAttachmentDefinitionSpec{ - Config: `{ - "cniVersion": "0.3.0", - "name": "whereaboutsexample", - "type": "macvlan", - "master": "eth0", - "mode": "bridge", - "ipam": { - "type": "whereabouts", - "range": "192.168.2.225/28", - "exclude": [ - "192.168.2.229/30", - "192.168.2.236/32" - ] - } -}`, - }, - } - netConfig, err := k8sutil.GetNetworkAttachmentConfig(*nad) - assert.NoError(t, err) - networkRange := getNetworkRange(netConfig) - assert.Equal(t, "192.168.2.225/28", networkRange) - }) -} - -func TestGetMultusNamespace(t *testing.T) { - // TEST 1: When namespace is specified with the NAD - namespace, nad := GetMultusNamespace("multus-ns/public-nad") - assert.Equal(t, "multus-ns", namespace) - assert.Equal(t, "public-nad", nad) - - // TEST 2: When only NAD is specified - namespace, nad = GetMultusNamespace("public-nad") - assert.Empty(t, namespace) - assert.Equal(t, "public-nad", nad) -} diff --git a/pkg/operator/ceph/config/store.go b/pkg/operator/ceph/config/store.go deleted file mode 100644 index 5ccfa142c..000000000 --- a/pkg/operator/ceph/config/store.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config allows a ceph config file to be stored in Kubernetes and mounted as volumes into -// Ceph daemon containers. -package config - -import ( - "context" - "strings" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // StoreName is the name of the configmap containing ceph configuration options - StoreName = "rook-ceph-config" - monHostKey = "mon_host" - monInitialMembersKey = "mon_initial_members" - // Msgr2port is the listening port of the messenger v2 protocol - Msgr2port = 3300 -) - -// Store manages storage of the Ceph config file shared by all daemons (if applicable) as well as an -// updated 'mon_host' which can be mapped to daemon containers and referenced in daemon command line -// arguments. -type Store struct { - configMapStore *k8sutil.ConfigMapKVStore - namespace string - context *clusterd.Context - ownerInfo *k8sutil.OwnerInfo -} - -// GetStore returns the Store for the cluster. -func GetStore(context *clusterd.Context, namespace string, ownerInfo *k8sutil.OwnerInfo) *Store { - return &Store{ - configMapStore: k8sutil.NewConfigMapKVStore(namespace, context.Clientset, ownerInfo), - namespace: namespace, - context: context, - ownerInfo: ownerInfo, - } -} - -// CreateOrUpdate creates or updates the stored Ceph config based on the cluster info. -func (s *Store) CreateOrUpdate(clusterInfo *cephclient.ClusterInfo) error { - ctx := context.TODO() - // these are used for all ceph daemons on the commandline and must *always* be stored - if err := s.createOrUpdateMonHostSecrets(ctx, clusterInfo); err != nil { - return errors.Wrap(err, "failed to store mon host configs") - } - - return nil -} - -// update "mon_host" and "mon_initial_members" in the stored config -func (s *Store) createOrUpdateMonHostSecrets(ctx context.Context, clusterInfo *cephclient.ClusterInfo) error { - - // extract a list of just the monitor names, which will populate the "mon initial members" - // and "mon hosts" global config field - members, hosts := cephclient.PopulateMonHostMembers(clusterInfo.Monitors) - - // store these in a secret instead of the configmap; secrets are required by CSI drivers - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - // the config's secret store has the same name as the configmap store for consistency - Name: StoreName, - Namespace: s.namespace, - }, - StringData: map[string]string{ - monHostKey: strings.Join(hosts, ","), - monInitialMembersKey: strings.Join(members, ","), - }, - Type: k8sutil.RookType, - } - clientset := s.context.Clientset - err := s.ownerInfo.SetControllerReference(secret) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to moh host secret %q", secret.Name) - } - - _, err = clientset.CoreV1().Secrets(s.namespace).Get(ctx, StoreName, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debugf("creating config secret %q", secret.Name) - if _, err := clientset.CoreV1().Secrets(s.namespace).Create(ctx, secret, metav1.CreateOptions{}); err != nil { - return errors.Wrapf(err, "failed to create config secret %+v", secret) - } - } else { - return errors.Wrapf(err, "failed to get config secret %s", StoreName) - } - } - - logger.Debugf("updating config secret %q", secret.Name) - if _, err := clientset.CoreV1().Secrets(s.namespace).Update(ctx, secret, metav1.UpdateOptions{}); err != nil { - return errors.Wrapf(err, "failed to update config secret %+v", secret) - } - - return nil -} - -// StoredMonHostEnvVars returns a container environment variable defined by the most updated stored -// "mon_host" and "mon_initial_members" information. -func StoredMonHostEnvVars() []v1.EnvVar { - return []v1.EnvVar{ - {Name: "ROOK_CEPH_MON_HOST", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{LocalObjectReference: v1.LocalObjectReference{ - Name: StoreName}, - Key: monHostKey}}}, - {Name: "ROOK_CEPH_MON_INITIAL_MEMBERS", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{LocalObjectReference: v1.LocalObjectReference{ - Name: StoreName}, - Key: monInitialMembersKey}}}, - } -} - -// StoredMonHostEnvVarFlags returns Ceph commandline flag references to "mon_host" and -// "mon_initial_members" sourced from the StoredMonHostEnvVars. -func StoredMonHostEnvVarFlags() []string { - return []string{ - NewFlag(monHostKey, "$(ROOK_CEPH_MON_HOST)"), - NewFlag(monInitialMembersKey, "$(ROOK_CEPH_MON_INITIAL_MEMBERS)"), - } -} diff --git a/pkg/operator/ceph/config/store_test.go b/pkg/operator/ceph/config/store_test.go deleted file mode 100644 index 735f55999..000000000 --- a/pkg/operator/ceph/config/store_test.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" - testop "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestStore(t *testing.T) { - ctxt := context.TODO() - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - } - ns := "rook-ceph" - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - - s := GetStore(ctx, ns, ownerInfo) - - assertConfigStore := func(ci *cephclient.ClusterInfo) { - sec, e := clientset.CoreV1().Secrets(ns).Get(ctxt, StoreName, metav1.GetOptions{}) - assert.NoError(t, e) - mh := strings.Split(sec.StringData["mon_host"], ",") // list of mon ip:port pairs in cluster - assert.Equal(t, len(ci.Monitors)*2, len(mh), ci.Monitors["a"].Endpoint) // we need to pass x2 since we split on "," above and that returns msgr1 and msgr2 addresses - mim := strings.Split(sec.StringData["mon_initial_members"], ",") // list of mon ids in cluster - assert.Equal(t, len(ci.Monitors), len(mim)) - // make sure every mon has its id/ip:port in mon_initial_members/mon_host - for _, id := range mim { - // cannot use "assert.Contains(t, mh, ci.Monitors[id].Endpoint)" - // it looks like the value is not found but if present, it might be confused by the brackets - contains := false - for _, c := range mh { - if strings.Contains(c, ci.Monitors[id].Endpoint) { - contains = true - } - } - assert.True(t, contains) - assert.Contains(t, mim, ci.Monitors[id].Name) - } - } - - i1 := clienttest.CreateTestClusterInfo(1) // cluster w/ one mon - i3 := clienttest.CreateTestClusterInfo(3) // same cluster w/ 3 mons - - err := s.CreateOrUpdate(i1) - assert.NoError(t, err) - assertConfigStore(i1) - - err = s.CreateOrUpdate(i3) - assert.NoError(t, err) - assertConfigStore(i3) -} - -func TestEnvVarsAndFlags(t *testing.T) { - clientset := testop.New(t, 1) - ctx := &clusterd.Context{ - Clientset: clientset, - } - ns := "rook-ceph" - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - - s := GetStore(ctx, ns, ownerInfo) - err := s.CreateOrUpdate(clienttest.CreateTestClusterInfo(3)) - assert.NoError(t, err) - - v := StoredMonHostEnvVars() - f := StoredMonHostEnvVarFlags() - - // make sure the env var names and flags are matching pairs - mh := v[0].Name - mim := v[1].Name - assert.Contains(t, f, fmt.Sprintf("--mon-host=$(%s)", mh)) - assert.Contains(t, f, fmt.Sprintf("--mon-initial-members=$(%s)", mim)) - - // make sure the env vars are sourced from the right place - assert.Equal(t, StoreName, v[0].ValueFrom.SecretKeyRef.LocalObjectReference.Name) - assert.Equal(t, "mon_host", v[0].ValueFrom.SecretKeyRef.Key) - assert.Equal(t, StoreName, v[1].ValueFrom.SecretKeyRef.LocalObjectReference.Name) - assert.Equal(t, "mon_initial_members", v[1].ValueFrom.SecretKeyRef.Key) -} diff --git a/pkg/operator/ceph/controller/conditions.go b/pkg/operator/ceph/controller/conditions.go deleted file mode 100644 index 2cb01f201..000000000 --- a/pkg/operator/ceph/controller/conditions.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config to provide conditions for CephCluster -package controller - -import ( - "context" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/ceph/reporting" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// UpdateCondition function will export each condition into the cluster custom resource -func UpdateCondition(c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status v1.ConditionStatus, reason cephv1.ConditionReason, message string) { - // use client.Client unit test this more easily with updating statuses which must use the client - cluster := &cephv1.CephCluster{} - if err := c.Client.Get(context.TODO(), namespaceName, cluster); err != nil { - logger.Errorf("failed to get cluster %v to update the conditions. %v", namespaceName, err) - return - } - - UpdateClusterCondition(c, cluster, namespaceName, conditionType, status, reason, message, false) -} - -// UpdateClusterCondition function will export each condition into the cluster custom resource -func UpdateClusterCondition(c *clusterd.Context, cluster *cephv1.CephCluster, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status v1.ConditionStatus, - reason cephv1.ConditionReason, message string, preserveAllConditions bool) { - - // Keep the conditions that already existed if they are in the list of long-term conditions, - // otherwise discard the temporary conditions - var currentCondition *cephv1.Condition - var conditions []cephv1.Condition - for _, condition := range cluster.Status.Conditions { - // Only keep conditions in the list if it's a persisted condition such as the cluster creation being completed. - // The transient conditions are not persisted. However, if the currently requested condition is not expected to - // reset the transient conditions, they are retained. For example, if the operator is checking for ceph health - // in the middle of the reconcile, the progress condition should not be reset by the status check update. - if preserveAllConditions || - condition.Reason == cephv1.ClusterCreatedReason || - condition.Reason == cephv1.ClusterConnectedReason || - condition.Type == cephv1.ConditionDeleting || - condition.Type == cephv1.ConditionDeletionIsBlocked { - if conditionType != condition.Type { - conditions = append(conditions, condition) - continue - } - // Update the existing condition with the new status - currentCondition = condition.DeepCopy() - if currentCondition.Status != status || currentCondition.Message != message { - // Update the last transition time since the status changed - currentCondition.LastTransitionTime = metav1.NewTime(time.Now()) - } - currentCondition.Status = status - currentCondition.Reason = reason - currentCondition.Message = message - currentCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) - } - } - if currentCondition == nil { - // Create a new condition since not found in the existing conditions - currentCondition = &cephv1.Condition{ - Type: conditionType, - Status: status, - Reason: reason, - Message: message, - LastTransitionTime: metav1.NewTime(time.Now()), - LastHeartbeatTime: metav1.NewTime(time.Now()), - } - } - conditions = append(conditions, *currentCondition) - cluster.Status.Conditions = conditions - - // Once the cluster begins deleting, the phase should not revert back to any other phase - if cluster.Status.Phase != cephv1.ConditionDeleting { - cluster.Status.Phase = conditionType - if state := translatePhasetoState(conditionType, status); state != "" { - cluster.Status.State = state - } - cluster.Status.Message = currentCondition.Message - logger.Debugf("CephCluster %q status: %q. %q", namespaceName.Namespace, cluster.Status.Phase, cluster.Status.Message) - } - - if err := reporting.UpdateStatus(c.Client, cluster); err != nil { - logger.Errorf("failed to update cluster condition to %+v. %v", *currentCondition, err) - } -} - -// translatePhasetoState convert the Phases to corresponding State -// 1. We still need to set the State in case someone is still using it -// instead of Phase. If we stopped setting the State it would be a -// breaking change. -// 2. We can't change the enum values of the State since that is also -// a breaking change. Therefore, we translate new phases to the original -// State values -func translatePhasetoState(phase cephv1.ConditionType, status v1.ConditionStatus) cephv1.ClusterState { - if status == v1.ConditionFalse { - return cephv1.ClusterStateError - } - switch phase { - case cephv1.ConditionConnecting: - return cephv1.ClusterStateConnecting - case cephv1.ConditionConnected: - return cephv1.ClusterStateConnected - case cephv1.ConditionProgressing: - return cephv1.ClusterStateCreating - case cephv1.ConditionReady: - return cephv1.ClusterStateCreated - case cephv1.ConditionDeleting: - // "Deleting" was not a state before, so just translate the "Deleting" condition directly. - return cephv1.ClusterState(cephv1.ConditionDeleting) - default: - return "" - } -} diff --git a/pkg/operator/ceph/controller/controller_utils.go b/pkg/operator/ceph/controller/controller_utils.go deleted file mode 100644 index 0357a8715..000000000 --- a/pkg/operator/ceph/controller/controller_utils.go +++ /dev/null @@ -1,194 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - // OperatorSettingConfigMapName refers to ConfigMap that configures rook ceph operator - OperatorSettingConfigMapName string = "rook-ceph-operator-config" - - // UninitializedCephConfigError refers to the error message printed by the Ceph CLI when there is no ceph configuration file - // This typically is raised when the operator has not finished initializing - UninitializedCephConfigError = "error calling conf_read_file" - - // OperatorNotInitializedMessage is the message we print when the Operator is not ready to reconcile, typically the ceph.conf has not been generated yet - OperatorNotInitializedMessage = "skipping reconcile since operator is still initializing" - - // CancellingOrchestrationMessage is the message to indicate a reconcile was cancelled - CancellingOrchestrationMessage = "CANCELLING CURRENT ORCHESTRATION" -) - -var ( - // ImmediateRetryResult Return this for a immediate retry of the reconciliation loop with the same request object. - ImmediateRetryResult = reconcile.Result{Requeue: true} - - // ImmediateRetryResultNoBackoff Return this for a immediate retry of the reconciliation loop with the same request object. - // Override the exponential backoff behavior by setting the RequeueAfter time explicitly. - ImmediateRetryResultNoBackoff = reconcile.Result{Requeue: true, RequeueAfter: time.Second} - - // WaitForRequeueIfCephClusterNotReady waits for the CephCluster to be ready - WaitForRequeueIfCephClusterNotReady = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} - - // WaitForRequeueIfFinalizerBlocked waits for resources to be cleaned up before the finalizer can be removed - WaitForRequeueIfFinalizerBlocked = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} - - // WaitForRequeueIfOperatorNotInitialized waits for resources to be cleaned up before the finalizer can be removed - WaitForRequeueIfOperatorNotInitialized = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} - - // OperatorCephBaseImageVersion is the ceph version in the operator image - OperatorCephBaseImageVersion string -) - -func FlexDriverEnabled(context *clusterd.Context) bool { - // Ignore the error. In the remote chance that the configmap fails to be read, we will default to disabling the flex driver - value, _ := k8sutil.GetOperatorSetting(context.Clientset, OperatorSettingConfigMapName, "ROOK_ENABLE_FLEX_DRIVER", "false") - return value == "true" -} - -func DiscoveryDaemonEnabled(context *clusterd.Context) bool { - // Ignore the error. In the remote chance that the configmap fails to be read, we will default to disabling the discovery daemon - value, _ := k8sutil.GetOperatorSetting(context.Clientset, OperatorSettingConfigMapName, "ROOK_ENABLE_DISCOVERY_DAEMON", "false") - return value == "true" -} - -// SetCephCommandsTimeout sets the timeout value of Ceph commands which are executed from Rook -func SetCephCommandsTimeout(context *clusterd.Context) { - strTimeoutSeconds, _ := k8sutil.GetOperatorSetting(context.Clientset, OperatorSettingConfigMapName, "ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS", "15") - timeoutSeconds, err := strconv.Atoi(strTimeoutSeconds) - if err != nil || timeoutSeconds < 1 { - logger.Warningf("ROOK_CEPH_COMMANDS_TIMEOUT is %q but it should be >= 1, set the default value 15", strTimeoutSeconds) - timeoutSeconds = 15 - } - exec.CephCommandsTimeout = time.Duration(timeoutSeconds) * time.Second -} - -// CheckForCancelledOrchestration checks whether a cancellation has been requested -func CheckForCancelledOrchestration(context *clusterd.Context) error { - defer context.RequestCancelOrchestration.UnSet() - - // Check whether we need to cancel the orchestration - if context.RequestCancelOrchestration.IsSet() { - return errors.New(CancellingOrchestrationMessage) - } - - return nil -} - -// canIgnoreHealthErrStatusInReconcile determines whether a status of HEALTH_ERR in the CephCluster can be ignored safely. -func canIgnoreHealthErrStatusInReconcile(cephCluster cephv1.CephCluster, controllerName string) bool { - // Get a list of all the keys causing the HEALTH_ERR status. - var healthErrKeys = make([]string, 0) - for key, health := range cephCluster.Status.CephStatus.Details { - if health.Severity == "HEALTH_ERR" { - healthErrKeys = append(healthErrKeys, key) - } - } - - // If there is only one cause for HEALTH_ERR and it's on the allowed list of errors, ignore it. - var allowedErrStatus = []string{"MDS_ALL_DOWN"} - var ignoreHealthErr = len(healthErrKeys) == 1 && contains(allowedErrStatus, healthErrKeys[0]) - if ignoreHealthErr { - logger.Debugf("%q: ignoring ceph status %q because only cause is %q (full status is %+v)", controllerName, cephCluster.Status.CephStatus.Health, healthErrKeys[0], cephCluster.Status.CephStatus) - } - return ignoreHealthErr -} - -// IsReadyToReconcile determines if a controller is ready to reconcile or not -func IsReadyToReconcile(c client.Client, clustercontext *clusterd.Context, namespacedName types.NamespacedName, controllerName string) (cephv1.CephCluster, bool, bool, reconcile.Result) { - cephClusterExists := false - - // Running ceph commands won't work and the controller will keep re-queuing so I believe it's fine not to check - // Make sure a CephCluster exists before doing anything - var cephCluster cephv1.CephCluster - clusterList := &cephv1.CephClusterList{} - err := c.List(context.TODO(), clusterList, client.InNamespace(namespacedName.Namespace)) - if err != nil { - logger.Errorf("%q: failed to fetch CephCluster %v", controllerName, err) - return cephCluster, false, cephClusterExists, ImmediateRetryResult - } - if len(clusterList.Items) == 0 { - logger.Debugf("%q: no CephCluster resource found in namespace %q", controllerName, namespacedName.Namespace) - return cephCluster, false, cephClusterExists, WaitForRequeueIfCephClusterNotReady - } - cephClusterExists = true - cephCluster = clusterList.Items[0] - - logger.Debugf("%q: CephCluster resource %q found in namespace %q", controllerName, cephCluster.Name, namespacedName.Namespace) - - // read the CR status of the cluster - if cephCluster.Status.CephStatus != nil { - var operatorDeploymentOk = cephCluster.Status.CephStatus.Health == "HEALTH_OK" || cephCluster.Status.CephStatus.Health == "HEALTH_WARN" - - if operatorDeploymentOk || canIgnoreHealthErrStatusInReconcile(cephCluster, controllerName) { - logger.Debugf("%q: ceph status is %q, operator is ready to run ceph command, reconciling", controllerName, cephCluster.Status.CephStatus.Health) - return cephCluster, true, cephClusterExists, WaitForRequeueIfCephClusterNotReady - } - - details := cephCluster.Status.CephStatus.Details - message, ok := details["error"] - if ok && len(details) == 1 && strings.Contains(message.Message, "Error initializing cluster client") { - logger.Infof("%s: skipping reconcile since operator is still initializing", controllerName) - } else { - logger.Infof("%s: CephCluster %q found but skipping reconcile since ceph health is %+v", controllerName, cephCluster.Name, cephCluster.Status.CephStatus) - } - } - - logger.Debugf("%q: CephCluster %q initial reconcile is not complete yet...", controllerName, namespacedName.Namespace) - return cephCluster, false, cephClusterExists, WaitForRequeueIfCephClusterNotReady -} - -// ClusterOwnerRef represents the owner reference of the CephCluster CR -func ClusterOwnerRef(clusterName, clusterID string) metav1.OwnerReference { - blockOwner := true - controller := true - return metav1.OwnerReference{ - APIVersion: fmt.Sprintf("%s/%s", ClusterResource.Group, ClusterResource.Version), - Kind: ClusterResource.Kind, - Name: clusterName, - UID: types.UID(clusterID), - BlockOwnerDeletion: &blockOwner, - Controller: &controller, - } -} - -// ClusterResource operator-kit Custom Resource Definition -var ClusterResource = k8sutil.CustomResource{ - Name: "cephcluster", - Plural: "cephclusters", - Group: cephv1.CustomResourceGroup, - Version: cephv1.Version, - Kind: reflect.TypeOf(cephv1.CephCluster{}).Name(), - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} diff --git a/pkg/operator/ceph/controller/controller_utils_test.go b/pkg/operator/ceph/controller/controller_utils_test.go deleted file mode 100644 index e123494ee..000000000 --- a/pkg/operator/ceph/controller/controller_utils_test.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "testing" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/util/exec" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" -) - -func CreateTestClusterFromStatusDetails(details map[string]cephv1.CephHealthMessage) cephv1.CephCluster { - return cephv1.CephCluster{ - Status: cephv1.ClusterStatus{ - CephStatus: &cephv1.CephStatus{ - Details: details, - }, - }, - } -} - -func TestCanIgnoreHealthErrStatusInReconcile(t *testing.T) { - var cluster = CreateTestClusterFromStatusDetails(map[string]cephv1.CephHealthMessage{ - "MDS_ALL_DOWN": { - Severity: "HEALTH_ERR", - Message: "MDS_ALL_DOWN", - }, - "TEST_OTHER": { - Severity: "HEALTH_WARN", - Message: "TEST_OTHER", - }, - "TEST_ANOTHER": { - Severity: "HEALTH_OK", - Message: "TEST_ANOTHER", - }, - }) - assert.True(t, canIgnoreHealthErrStatusInReconcile(cluster, "controller")) - - cluster = CreateTestClusterFromStatusDetails(map[string]cephv1.CephHealthMessage{ - "MDS_ALL_DOWN": { - Severity: "HEALTH_ERR", - Message: "MDS_ALL_DOWN", - }, - "TEST_UNIGNORABLE": { - Severity: "HEALTH_ERR", - Message: "TEST_UNIGNORABLE", - }, - }) - assert.False(t, canIgnoreHealthErrStatusInReconcile(cluster, "controller")) - - cluster = CreateTestClusterFromStatusDetails(map[string]cephv1.CephHealthMessage{ - "TEST_UNIGNORABLE": { - Severity: "HEALTH_ERR", - Message: "TEST_UNIGNORABLE", - }, - }) - assert.False(t, canIgnoreHealthErrStatusInReconcile(cluster, "controller")) -} - -func TestSetCephCommandsTimeout(t *testing.T) { - clientset := fake.NewSimpleClientset() - ctx := context.TODO() - cm := &v1.ConfigMap{} - cm.Name = "rook-ceph-operator-config" - _, err := clientset.CoreV1().ConfigMaps("").Create(ctx, cm, metav1.CreateOptions{}) - assert.NoError(t, err) - context := &clusterd.Context{Clientset: clientset} - - SetCephCommandsTimeout(context) - assert.Equal(t, 15*time.Second, exec.CephCommandsTimeout) - - exec.CephCommandsTimeout = 0 - cm.Data = map[string]string{"ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS": "0"} - _, err = clientset.CoreV1().ConfigMaps("").Update(ctx, cm, metav1.UpdateOptions{}) - assert.NoError(t, err) - SetCephCommandsTimeout(context) - assert.Equal(t, 15*time.Second, exec.CephCommandsTimeout) - - exec.CephCommandsTimeout = 0 - cm.Data = map[string]string{"ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS": "1"} - _, err = clientset.CoreV1().ConfigMaps("").Update(ctx, cm, metav1.UpdateOptions{}) - assert.NoError(t, err) - SetCephCommandsTimeout(context) - assert.Equal(t, 1*time.Second, exec.CephCommandsTimeout) -} diff --git a/pkg/operator/ceph/controller/finalizer.go b/pkg/operator/ceph/controller/finalizer.go deleted file mode 100644 index 69bf79224..000000000 --- a/pkg/operator/ceph/controller/finalizer.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "strings" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/api/meta" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// contains checks if an item exists in a given list. -func contains(list []string, s string) bool { - for _, v := range list { - if v == s { - return true - } - } - - return false -} - -// remove removes any element from a list -func remove(list []string, s string) []string { - for i, v := range list { - if v == s { - list = append(list[:i], list[i+1:]...) - } - } - - return list -} - -// AddFinalizerIfNotPresent adds a finalizer an object to avoid instant deletion -// of the object without finalizing it. -func AddFinalizerIfNotPresent(client client.Client, obj client.Object) error { - objectFinalizer := buildFinalizerName(obj.GetObjectKind().GroupVersionKind().Kind) - - accessor, err := meta.Accessor(obj) - if err != nil { - return errors.Wrap(err, "failed to get meta information of object") - } - - if !contains(accessor.GetFinalizers(), objectFinalizer) { - logger.Infof("adding finalizer %q on %q", objectFinalizer, accessor.GetName()) - accessor.SetFinalizers(append(accessor.GetFinalizers(), objectFinalizer)) - - // Update CR with finalizer - if err := client.Update(context.TODO(), obj); err != nil { - return errors.Wrapf(err, "failed to add finalizer %q on %q", objectFinalizer, accessor.GetName()) - } - } - - return nil -} - -// RemoveFinalizer removes a finalizer from an object -func RemoveFinalizer(client client.Client, obj client.Object) error { - objectFinalizer := buildFinalizerName(obj.GetObjectKind().GroupVersionKind().Kind) - accessor, err := meta.Accessor(obj) - if err != nil { - return errors.Wrap(err, "failed to get meta information of object") - } - - if contains(accessor.GetFinalizers(), objectFinalizer) { - logger.Infof("removing finalizer %q on %q", objectFinalizer, accessor.GetName()) - accessor.SetFinalizers(remove(accessor.GetFinalizers(), objectFinalizer)) - if err := client.Update(context.TODO(), obj); err != nil { - return errors.Wrapf(err, "failed to remove finalizer %q on %q", objectFinalizer, accessor.GetName()) - } - } - - return nil -} - -// buildFinalizerName returns the finalizer name -func buildFinalizerName(kind string) string { - return fmt.Sprintf("%s.%s", strings.ToLower(kind), cephv1.CustomResourceGroup) -} diff --git a/pkg/operator/ceph/controller/finalizer_test.go b/pkg/operator/ceph/controller/finalizer_test.go deleted file mode 100644 index c184b1f83..000000000 --- a/pkg/operator/ceph/controller/finalizer_test.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestAddFinalizerIfNotPresent(t *testing.T) { - fakeObject := &cephv1.CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "rook-ceph", - Finalizers: []string{}, - }, - } - - // Objects to track in the fake client. - object := []runtime.Object{ - fakeObject, - } - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, fakeObject) - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - assert.Empty(t, fakeObject.Finalizers) - err := AddFinalizerIfNotPresent(cl, fakeObject) - assert.NoError(t, err) - assert.NotEmpty(t, fakeObject.Finalizers) -} - -func TestRemoveFinalizer(t *testing.T) { - fakeObject := &cephv1.CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "rook-ceph", - Finalizers: []string{ - "cephblockpool.ceph.rook.io", - }, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "cephblockpool", - }, - } - - object := []runtime.Object{ - fakeObject, - } - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, fakeObject) - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - assert.NotEmpty(t, fakeObject.Finalizers) - err := RemoveFinalizer(cl, fakeObject) - assert.NoError(t, err) - assert.Empty(t, fakeObject.Finalizers) -} diff --git a/pkg/operator/ceph/controller/handler.go b/pkg/operator/ceph/controller/handler.go deleted file mode 100644 index 813a2abb2..000000000 --- a/pkg/operator/ceph/controller/handler.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "sigs.k8s.io/controller-runtime/pkg/handler" -) - -// ObjectToCRMapper returns the list of a given object type metadata -// It is used to trigger a reconcile object Kind A when watching object Kind B -// So we reconcile Kind A instead of Kind B -// For instance, we watch for CephCluster CR changes but want to reconcile CephFilesystem based on a Spec change -func ObjectToCRMapper(c client.Client, ro runtime.Object, scheme *runtime.Scheme) (handler.MapFunc, error) { - if _, ok := ro.(metav1.ListInterface); !ok { - return nil, errors.Errorf("expected a metav1.ListInterface, got %T instead", ro) - } - - gvk, err := apiutil.GVKForObject(ro, scheme) - if err != nil { - return nil, err - } - - // return handler.EnqueueRequestsFromMapFunc(func(o client.Object) []ctrl.Request { - return handler.MapFunc(func(o client.Object) []ctrl.Request { - list := &unstructured.UnstructuredList{} - list.SetGroupVersionKind(gvk) - err := c.List(context.TODO(), list) - if err != nil { - return nil - } - - results := []ctrl.Request{} - for _, obj := range list.Items { - results = append(results, ctrl.Request{ - NamespacedName: client.ObjectKey{ - Namespace: obj.GetNamespace(), - Name: obj.GetName(), - }, - }) - } - return results - - }), nil -} diff --git a/pkg/operator/ceph/controller/handler_test.go b/pkg/operator/ceph/controller/handler_test.go deleted file mode 100644 index e5b3b1272..000000000 --- a/pkg/operator/ceph/controller/handler_test.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "reflect" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestObjectToCRMapper(t *testing.T) { - fs := &cephv1.CephFilesystem{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: reflect.TypeOf(cephv1.CephFilesystem{}).Name(), - }, - } - - // Objects to track in the fake client. - objects := []runtime.Object{ - &cephv1.CephFilesystemList{}, - fs, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephFilesystemList{}) - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephFilesystem{}) - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objects...).Build() - - // Fake reconcile request - fakeRequest := []ctrl.Request{ - {NamespacedName: client.ObjectKey{Name: "my-pool", Namespace: "rook-ceph"}}, - } - - handlerFunc, err := ObjectToCRMapper(cl, objects[0], s) - assert.NoError(t, err) - assert.ElementsMatch(t, fakeRequest, handlerFunc(fs)) -} diff --git a/pkg/operator/ceph/controller/label.go b/pkg/operator/ceph/controller/label.go deleted file mode 100644 index 360a4b511..000000000 --- a/pkg/operator/ceph/controller/label.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - - "github.com/rook/rook/pkg/operator/ceph/version" - apps "k8s.io/api/apps/v1" - batch "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // CephVersionLabelKey is the key used for reporting the Ceph version which Rook has detected is - // configured for the labeled resource. - CephVersionLabelKey = "ceph-version" -) - -// Add the Ceph version to the given labels. This should *not* be used on pod specifications, -// because this will result in the deployment/daemonset/etc. recreating all of its pods even if an -// update wouldn't otherwise be required. Upgrading unnecessarily increases risk for loss of data -// reliability, even if only briefly. -func addCephVersionLabel(cephVersion version.CephVersion, labels map[string]string) { - // cephVersion.String() returns a string with a space in it, and labels in k8s are limited to - // alphanum characters plus '-', '_', '.' - labels[CephVersionLabelKey] = GetCephVersionLabel(cephVersion) -} - -// GetCephVersionLabel returns a formatted serialization of a provided CephVersion for use in resource labels. -func GetCephVersionLabel(cephVersion version.CephVersion) string { - return fmt.Sprintf("%d.%d.%d-%d", - cephVersion.Major, cephVersion.Minor, cephVersion.Extra, cephVersion.Build) -} - -// ExtractCephVersionFromLabel returns a CephVersion struct deserialized from a provided version label. -func ExtractCephVersionFromLabel(labelVersion string) (*version.CephVersion, error) { - return version.ExtractCephVersion(fmt.Sprintf("ceph version %s", labelVersion)) -} - -// AddCephVersionLabelToDeployment adds a label reporting the Ceph version which Rook has detected is -// running in the Deployment's pods. -func AddCephVersionLabelToDeployment(cephVersion version.CephVersion, d *apps.Deployment) { - if d == nil { - return - } - if d.Labels == nil { - d.Labels = map[string]string{} - } - addCephVersionLabel(cephVersion, d.Labels) -} - -// AddCephVersionLabelToDaemonSet adds a label reporting the Ceph version which Rook has detected is -// running in the DaemonSet's pods. -func AddCephVersionLabelToDaemonSet(cephVersion version.CephVersion, d *apps.DaemonSet) { - if d == nil { - return - } - if d.Labels == nil { - d.Labels = map[string]string{} - } - addCephVersionLabel(cephVersion, d.Labels) -} - -// AddCephVersionLabelToJob adds a label reporting the Ceph version which Rook has detected is -// running in the Job's pods. -func AddCephVersionLabelToJob(cephVersion version.CephVersion, j *batch.Job) { - if j == nil { - return - } - if j.Labels == nil { - j.Labels = map[string]string{} - } - addCephVersionLabel(cephVersion, j.Labels) -} - -func AddCephVersionLabelToObjectMeta(cephVersion version.CephVersion, meta *metav1.ObjectMeta) { - if meta.Labels == nil { - meta.Labels = map[string]string{} - } - addCephVersionLabel(cephVersion, meta.Labels) -} diff --git a/pkg/operator/ceph/controller/mirror_peer.go b/pkg/operator/ceph/controller/mirror_peer.go deleted file mode 100644 index d033488e9..000000000 --- a/pkg/operator/ceph/controller/mirror_peer.go +++ /dev/null @@ -1,224 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package controller provides Kubernetes controller/pod/container spec items used for many Ceph daemons -package controller - -import ( - "encoding/base64" - "encoding/json" - "fmt" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - // #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name - poolMirrorBoostrapPeerSecretName = "pool-peer-token" - // #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name - fsMirrorBoostrapPeerSecretName = "fs-peer-token" - // #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name - clusterMirrorBoostrapPeerSecretName = "cluster-peer-token" - // RBDMirrorBootstrapPeerSecretName #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name - RBDMirrorBootstrapPeerSecretName = "rbdMirrorBootstrapPeerSecretName" - // FSMirrorBootstrapPeerSecretName #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name - FSMirrorBootstrapPeerSecretName = "fsMirrorBootstrapPeerSecretName" -) - -func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.ClusterInfo, object client.Object, ownerInfo *k8sutil.OwnerInfo) (reconcile.Result, error) { - var err error - var ns, name, daemonType string - var boostrapToken []byte - switch objectType := object.(type) { - case *cephv1.CephBlockPool: - ns = objectType.Namespace - name = objectType.Name - daemonType = "rbd" - // Create rbd mirror bootstrap peer token - boostrapToken, err = cephclient.CreateRBDMirrorBootstrapPeer(ctx, clusterInfo, name) - if err != nil { - return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer", daemonType) - } - - // Add additional information to the peer token - boostrapToken, err = expandBootstrapPeerToken(ctx, clusterInfo, boostrapToken) - if err != nil { - return ImmediateRetryResult, errors.Wrap(err, "failed to add extra information to rbd-mirror bootstrap peer") - } - - case *cephv1.CephCluster: - ns = objectType.Namespace - daemonType = "cluster-rbd" - // Create rbd mirror bootstrap peer token - boostrapToken, err = cephclient.CreateRBDMirrorBootstrapPeerWithoutPool(ctx, clusterInfo) - if err != nil { - return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer", daemonType) - } - - // Add additional information to the peer token - boostrapToken, err = expandBootstrapPeerToken(ctx, clusterInfo, boostrapToken) - if err != nil { - return ImmediateRetryResult, errors.Wrap(err, "failed to add extra information to rbd-mirror bootstrap peer") - } - - case *cephv1.CephFilesystem: - ns = objectType.Namespace - name = objectType.Name - daemonType = "cephfs" - boostrapToken, err = cephclient.CreateFSMirrorBootstrapPeer(ctx, clusterInfo, name) - if err != nil { - return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer", daemonType) - } - - default: - return ImmediateRetryResult, errors.Wrap(err, "failed to create bootstrap peer unknown daemon type") - } - - // Generate and create a Kubernetes Secret with this token - s := GenerateBootstrapPeerSecret(object, boostrapToken) - - // set ownerref to the Secret - err = ownerInfo.SetControllerReference(s) - if err != nil { - return ImmediateRetryResult, errors.Wrapf(err, "failed to set owner reference for %s-mirror bootstrap peer secret %q", daemonType, s.Name) - } - - // Create Secret - logger.Debugf("store %s-mirror bootstrap token in a Kubernetes Secret %q in namespace %q", daemonType, s.Name, ns) - _, err = k8sutil.CreateOrUpdateSecret(ctx.Clientset, s) - if err != nil && !kerrors.IsAlreadyExists(err) { - return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer %q secret", daemonType, s.Name) - } - - return reconcile.Result{}, nil -} - -// GenerateBootstrapPeerSecret generates a Kubernetes Secret for the mirror bootstrap peer token -func GenerateBootstrapPeerSecret(object client.Object, token []byte) *v1.Secret { - var entityType, entityName, entityNamespace string - - switch objectType := object.(type) { - case *cephv1.CephFilesystem: - entityType = "fs" - entityName = objectType.Name - entityNamespace = objectType.Namespace - case *cephv1.CephBlockPool: - entityType = "pool" - entityName = objectType.Name - entityNamespace = objectType.Namespace - case *cephv1.CephCluster: - entityType = "cluster" - entityName = objectType.Name - entityNamespace = objectType.Namespace - } - - s := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: buildBoostrapPeerSecretName(object), - Namespace: entityNamespace, - }, - Data: map[string][]byte{ - "token": token, - entityType: []byte(entityName), - }, - Type: k8sutil.RookType, - } - - return s -} - -func buildBoostrapPeerSecretName(object client.Object) string { - switch objectType := object.(type) { - case *cephv1.CephFilesystem: - return fmt.Sprintf("%s-%s", fsMirrorBoostrapPeerSecretName, objectType.Name) - case *cephv1.CephBlockPool: - return fmt.Sprintf("%s-%s", poolMirrorBoostrapPeerSecretName, objectType.Name) - case *cephv1.CephCluster: - return fmt.Sprintf("%s-%s", clusterMirrorBoostrapPeerSecretName, objectType.Name) - } - - return "" -} - -func GenerateStatusInfo(object client.Object) map[string]string { - m := make(map[string]string) - - switch object.(type) { - case *cephv1.CephFilesystem: - m[FSMirrorBootstrapPeerSecretName] = buildBoostrapPeerSecretName(object) - case *cephv1.CephBlockPool: - m[RBDMirrorBootstrapPeerSecretName] = buildBoostrapPeerSecretName(object) - } - - return m -} - -func ValidatePeerToken(object client.Object, data map[string][]byte) error { - if len(data) == 0 { - return errors.Errorf("failed to lookup 'data' secret field (empty)") - } - - // Lookup Secret keys and content - keysToTest := []string{"token"} - switch object.(type) { - case *cephv1.CephRBDMirror: - keysToTest = append(keysToTest, "pool") - } - - for _, key := range keysToTest { - k, ok := data[key] - if !ok || len(k) == 0 { - return errors.Errorf("failed to lookup %q key in secret bootstrap peer (missing or empty)", key) - } - } - - return nil -} - -func expandBootstrapPeerToken(ctx *clusterd.Context, clusterInfo *cephclient.ClusterInfo, token []byte) ([]byte, error) { - // First decode the token, it's base64 encoded - decodedToken, err := base64.StdEncoding.DecodeString(string(token)) - if err != nil { - return nil, errors.Wrap(err, "failed to decode bootstrap peer token") - } - - // Unmarshal the decoded value to a Go type - var decodedTokenToGo cephclient.PeerToken - err = json.Unmarshal(decodedToken, &decodedTokenToGo) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal decoded token") - } - - decodedTokenToGo.Namespace = clusterInfo.Namespace - - // Marshal the Go type back to JSON - decodedTokenBackToJSON, err := json.Marshal(decodedTokenToGo) - if err != nil { - return nil, errors.Wrap(err, "failed to encode go type back to json") - } - - // Return the base64 encoded token - return []byte(base64.StdEncoding.EncodeToString(decodedTokenBackToJSON)), nil -} diff --git a/pkg/operator/ceph/controller/mirror_peer_test.go b/pkg/operator/ceph/controller/mirror_peer_test.go deleted file mode 100644 index 236b966dd..000000000 --- a/pkg/operator/ceph/controller/mirror_peer_test.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package controller provides Kubernetes controller/pod/container spec items used for many Ceph daemons -package controller - -import ( - "encoding/base64" - "reflect" - "testing" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func TestValidatePeerToken(t *testing.T) { - // Error: map is empty - b := &cephv1.CephRBDMirror{} - data := map[string][]byte{} - err := ValidatePeerToken(b, data) - assert.Error(t, err) - - // Error: map is missing pool and site - data["token"] = []byte("foo") - err = ValidatePeerToken(b, data) - assert.Error(t, err) - - // Error: map is missing pool - data["site"] = []byte("foo") - err = ValidatePeerToken(b, data) - assert.Error(t, err) - - // Success CephRBDMirror - data["pool"] = []byte("foo") - err = ValidatePeerToken(b, data) - assert.NoError(t, err) - - // Success CephFilesystem - // "pool" is not required here - delete(data, "pool") - err = ValidatePeerToken(&cephv1.CephFilesystemMirror{}, data) - assert.NoError(t, err) - - // Success CephFilesystem - err = ValidatePeerToken(&cephv1.CephFilesystemMirror{}, data) - assert.NoError(t, err) -} - -func TestGenerateStatusInfo(t *testing.T) { - type args struct { - object client.Object - } - tests := []struct { - name string - args args - want map[string]string - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GenerateStatusInfo(tt.args.object); !reflect.DeepEqual(got, tt.want) { - t.Errorf("GenerateStatusInfo() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestExpandBootstrapPeerToken(t *testing.T) { - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if reflect.DeepEqual(args[0:5], []string{"osd", "pool", "get", "pool", "all"}) { - return `{"pool_id":13}`, nil - } - - return "", errors.Errorf("unknown command args: %s", args[0:5]) - }, - } - c := &clusterd.Context{ - Executor: executor, - } - - newToken, err := expandBootstrapPeerToken(c, cephclient.AdminClusterInfo("mu-cluster"), []byte(`eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ==`)) - assert.NoError(t, err) - newTokenDecoded, err := base64.StdEncoding.DecodeString(string(newToken)) - assert.NoError(t, err) - assert.Contains(t, string(newTokenDecoded), "namespace") -} diff --git a/pkg/operator/ceph/controller/object_operations.go b/pkg/operator/ceph/controller/object_operations.go deleted file mode 100644 index 668841657..000000000 --- a/pkg/operator/ceph/controller/object_operations.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "reflect" - - "k8s.io/apimachinery/pkg/api/meta" - - "github.com/pkg/errors" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// CreateOrUpdateObject updates an object with a given status -func CreateOrUpdateObject(client client.Client, obj client.Object) error { - accessor, err := meta.Accessor(obj) - if err != nil { - return errors.Wrap(err, "failed to get meta information of object") - } - objName := accessor.GetName() - - // Somehow meta.TypeAccessor returns an empty string for the type name so using reflection instead - objType := reflect.TypeOf(obj) - - err = client.Create(context.TODO(), obj) - if err != nil { - if kerrors.IsAlreadyExists(err) { - err = client.Update(context.TODO(), obj) - if err != nil { - return errors.Wrapf(err, "failed to update ceph %q object %q", objType, objName) - } - - logger.Infof("updated ceph %q object %q", objType, objName) - return nil - } - return errors.Wrapf(err, "failed to create ceph %v object %q", objType, objName) - } - - logger.Infof("created ceph %v object %q", objType, objName) - return nil -} diff --git a/pkg/operator/ceph/controller/owner.go b/pkg/operator/ceph/controller/owner.go deleted file mode 100644 index b9c07db3e..000000000 --- a/pkg/operator/ceph/controller/owner.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// OwnerMatcher is a struct representing the controller owner reference -// to use for comparison with child objects -type OwnerMatcher struct { - owner runtime.Object - ownerMeta metav1.Object - ownerTypeGroupKind schema.GroupKind - scheme *runtime.Scheme -} - -// NewOwnerReferenceMatcher initializes a new owner reference matcher -func NewOwnerReferenceMatcher(owner runtime.Object, scheme *runtime.Scheme) (*OwnerMatcher, error) { - m := &OwnerMatcher{ - owner: owner, - scheme: scheme, - } - - meta, _ := meta.Accessor(owner) - m.ownerMeta = meta - if err := m.setOwnerTypeGroupKind(); err != nil { - return m, errors.Wrap(err, "failed to set ownerType %v") - } - - return m, nil -} - -// Match checks whether a given object matches the parent controller owner reference -// It is used in the predicate functions for non-CRD objects to ensure we only watch resources -// that have the parent Kind in its owner reference AND the same UID -// -// So we won't reconcile other object is we have multiple CRs -// -// For example, for CephObjectStore we will only watch "secrets" that have an owner reference -// referencing the 'CephObjectStore' Kind -func (e *OwnerMatcher) Match(object runtime.Object) (bool, metav1.Object, error) { - o, err := meta.Accessor(object) - if err != nil { - return false, o, errors.Wrapf(err, "could not access object meta kind %q", object.GetObjectKind()) - } - - // Iterate over owner reference of the child object - for _, owner := range e.getOwnersReferences(o) { - groupVersion, err := schema.ParseGroupVersion(owner.APIVersion) - if err != nil { - return false, o, errors.Wrapf(err, "could not parse api version %q", owner.APIVersion) - } - - if (e.ownerMeta.GetUID() == "" || (e.ownerMeta.GetUID() != "" && owner.UID == e.ownerMeta.GetUID())) && owner.Kind == e.ownerTypeGroupKind.Kind && groupVersion.Group == e.ownerTypeGroupKind.Group { - return true, o, nil - } - } - - return false, o, nil -} - -func (e *OwnerMatcher) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { - if object == nil { - return nil - } - ownerRef := metav1.GetControllerOf(object) - if ownerRef != nil { - return []metav1.OwnerReference{*ownerRef} - } - - return nil -} - -func (e *OwnerMatcher) setOwnerTypeGroupKind() error { - kinds, _, err := e.scheme.ObjectKinds(e.owner) - if err != nil || len(kinds) < 1 { - return errors.Wrapf(err, "could not get object kinds %v", e.owner) - } - - e.ownerTypeGroupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} - return nil -} diff --git a/pkg/operator/ceph/controller/owner_test.go b/pkg/operator/ceph/controller/owner_test.go deleted file mode 100644 index 8f1a9f880..000000000 --- a/pkg/operator/ceph/controller/owner_test.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "reflect" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestMatch(t *testing.T) { - isController := true - - // Setup controller object - fakeObject := &cephv1.CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "rook-ceph", - UID: "ce6807a0-7270-4874-9e9f-ae493d48b814", - Name: "my-store", - }, - TypeMeta: metav1.TypeMeta{ - Kind: reflect.TypeOf(cephv1.CephObjectStore{}).Name(), - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), - }, - } - - // Setup child object - fakeChildObject := &corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "rgw-secret", - Namespace: "rook-ceph", - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "ceph.rook.io/v1", - Kind: "wrong kind", - Name: "my-store", - UID: "wrong-uid", - Controller: &isController, - }, - }, - }, - } - - // Setup scheme - scheme := scheme.Scheme - scheme.AddKnownTypes(cephv1.SchemeGroupVersion, fakeObject) - - // Wrong Kind - ownerMatcher, err := NewOwnerReferenceMatcher(fakeObject, scheme) - assert.NoError(t, err) - match, _, err := ownerMatcher.Match(fakeChildObject) - assert.NoError(t, err) - assert.False(t, match) - - // Good Kind but wrong UID - fakeChildObject.OwnerReferences[0].Kind = "CephObjectStore" - match, _, err = ownerMatcher.Match(fakeChildObject) - assert.NoError(t, err) - assert.False(t, match) - - // Good Kind AND good UID - fakeChildObject.OwnerReferences[0].UID = "ce6807a0-7270-4874-9e9f-ae493d48b814" - match, _, err = ownerMatcher.Match(fakeChildObject) - assert.NoError(t, err) - assert.True(t, match) -} diff --git a/pkg/operator/ceph/controller/predicate.go b/pkg/operator/ceph/controller/predicate.go deleted file mode 100644 index 2fe85c5df..000000000 --- a/pkg/operator/ceph/controller/predicate.go +++ /dev/null @@ -1,677 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "encoding/json" - "strings" - - "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" -) - -const ( - cephVersionLabelKey = "ceph_version" - DoNotReconcileLabelName = "do_not_reconcile" -) - -// WatchControllerPredicate is a special update filter for update events -// do not reconcile if the the status changes, this avoids a reconcile storm loop -// -// returning 'true' means triggering a reconciliation -// returning 'false' means do NOT trigger a reconciliation -func WatchControllerPredicate() predicate.Funcs { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - logger.Debug("create event from a CR") - return true - }, - DeleteFunc: func(e event.DeleteEvent) bool { - logger.Debug("delete event from a CR") - return true - }, - UpdateFunc: func(e event.UpdateEvent) bool { - logger.Debug("update event from a CR") - // resource.Quantity has non-exportable fields, so we use its comparator method - resourceQtyComparer := cmp.Comparer(func(x, y resource.Quantity) bool { return x.Cmp(y) == 0 }) - - switch objOld := e.ObjectOld.(type) { - case *cephv1.CephObjectStore: - objNew := e.ObjectNew.(*cephv1.CephObjectStore) - logger.Debug("update event on CephObjectStore CR") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - // Handling upgrades - isUpgrade := isUpgrade(objOld.GetLabels(), objNew.GetLabels()) - if isUpgrade { - return true - } - - case *cephv1.CephObjectStoreUser: - objNew := e.ObjectNew.(*cephv1.CephObjectStoreUser) - logger.Debug("update event on CephObjectStoreUser CR") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - - case *cephv1.CephObjectRealm: - objNew := e.ObjectNew.(*cephv1.CephObjectRealm) - logger.Debug("update event on CephObjectRealm") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - - case *cephv1.CephObjectZoneGroup: - objNew := e.ObjectNew.(*cephv1.CephObjectZoneGroup) - logger.Debug("update event on CephObjectZoneGroup") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - - case *cephv1.CephObjectZone: - objNew := e.ObjectNew.(*cephv1.CephObjectZone) - logger.Debug("update event on CephObjectZone") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - - case *cephv1.CephBlockPool: - objNew := e.ObjectNew.(*cephv1.CephBlockPool) - logger.Debug("update event on CephBlockPool CR") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - - case *cephv1.CephFilesystem: - objNew := e.ObjectNew.(*cephv1.CephFilesystem) - logger.Debug("update event on CephFilesystem CR") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - // Handling upgrades - isUpgrade := isUpgrade(objOld.GetLabels(), objNew.GetLabels()) - if isUpgrade { - return true - } - - case *cephv1.CephNFS: - objNew := e.ObjectNew.(*cephv1.CephNFS) - logger.Debug("update event on CephNFS CR") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - // Handling upgrades - isUpgrade := isUpgrade(objOld.GetLabels(), objNew.GetLabels()) - if isUpgrade { - return true - } - - case *cephv1.CephRBDMirror: - objNew := e.ObjectNew.(*cephv1.CephRBDMirror) - logger.Debug("update event on CephRBDMirror CR") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - // Handling upgrades - isUpgrade := isUpgrade(objOld.GetLabels(), objNew.GetLabels()) - if isUpgrade { - return true - } - - case *cephv1.CephClient: - objNew := e.ObjectNew.(*cephv1.CephClient) - logger.Debug("update event on CephClient CR") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - // Handling upgrades - isUpgrade := isUpgrade(objOld.GetLabels(), objNew.GetLabels()) - if isUpgrade { - return true - } - - case *cephv1.CephFilesystemMirror: - objNew := e.ObjectNew.(*cephv1.CephFilesystemMirror) - logger.Debug("update event on CephFilesystemMirror CR") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) - return true - } else if objectToBeDeleted(objOld, objNew) { - logger.Debugf("CR %q is going be deleted", objNew.Name) - return true - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - // Handling upgrades - isUpgrade := isUpgrade(objOld.GetLabels(), objNew.GetLabels()) - if isUpgrade { - return true - } - } - - return false - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - } -} - -// WatchCephClusterPredicate is a predicate used by child controllers such as Filesystem or Object -// It watch for CR changes on the CephCluster object and reconciles if this needs to be propagated -// For instance the logCollector option from the CephCluster spec affects the configuration of rgw pods -// So if it changes we must update the deployment -func WatchCephClusterPredicate() predicate.Funcs { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - logger.Debug("create event from a CR") - return true - }, - DeleteFunc: func(e event.DeleteEvent) bool { - logger.Debug("delete event from a CR") - return true - }, - UpdateFunc: func(e event.UpdateEvent) bool { - // resource.Quantity has non-exportable fields, so we use its comparator method - resourceQtyComparer := cmp.Comparer(func(x, y resource.Quantity) bool { return x.Cmp(y) == 0 }) - - switch objOld := e.ObjectOld.(type) { - case *cephv1.CephCluster: - objNew := e.ObjectNew.(*cephv1.CephCluster) - logger.Debug("update event on CephCluster CR") - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - isDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) - if isDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objNew.Name) - return false - } - diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) - if diff != "" { - // The image change (upgrade) is being taking care by watchControllerPredicate() in the cluster package - if objOld.Spec.CephVersion.Image != objNew.Spec.CephVersion.Image { - return false - } - // If the log collector setting changes let's reconcile the child controllers - if !cmp.Equal(objOld.Spec.LogCollector, objNew.Spec.LogCollector) { - logger.Debug("log collector option changed, reconciling") - return true - } - } else if objOld.GetGeneration() != objNew.GetGeneration() { - logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) - } - } - - return false - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - } -} - -func objectToBeDeleted(oldObj, newObj client.Object) bool { - return !oldObj.GetDeletionTimestamp().Equal(newObj.GetDeletionTimestamp()) -} - -// objectChanged checks whether the object has been updated -func objectChanged(oldObj, newObj runtime.Object, objectName string) (bool, error) { - var doReconcile bool - old := oldObj.DeepCopyObject() - new := newObj.DeepCopyObject() - - // Set resource version - accessor := meta.NewAccessor() - currentResourceVersion, err := accessor.ResourceVersion(old) - if err == nil { - if err := accessor.SetResourceVersion(new, currentResourceVersion); err != nil { - return false, errors.Wrapf(err, "failed to set resource version to %s", currentResourceVersion) - } - } else { - return false, errors.Wrap(err, "failed to query current resource version") - } - - // Calculate diff between old and new object - diff, err := patch.DefaultPatchMaker.Calculate(old, new) - if err != nil { - doReconcile = true - return doReconcile, errors.Wrap(err, "failed to calculate object diff but let's reconcile just in case") - } else if diff.IsEmpty() { - logger.Debugf("object %q diff is empty, nothing to reconcile", objectName) - return doReconcile, nil - } - - // Do not leak details of diff if the object contains sensitive data (e.g., it is a Secret) - isSensitive := false - if _, ok := new.(*corev1.Secret); ok { - logger.Debugf("object %q diff is [redacted for Secrets]", objectName) - isSensitive = true - } else { - logger.Debugf("object %q diff is %s", objectName, diff.String()) - isSensitive = false - } - - return isValidEvent(diff.Patch, objectName, isSensitive), nil -} - -// WatchPredicateForNonCRDObject is a special filter for create events -// It only applies to non-CRD objects, meaning, for instance a cephv1.CephBlockPool{} -// object will not have this filter -// Only for objects like &v1.Secret{} etc... -// -// We return 'false' on a create event so we don't overstep with the main watcher on cephv1.CephBlockPool{} -// This avoids a double reconcile when the secret gets deleted. -func WatchPredicateForNonCRDObject(owner runtime.Object, scheme *runtime.Scheme) predicate.Funcs { - // Initialize the Owner Matcher, which is the main controller object: e.g. cephv1.CephBlockPool{} - ownerMatcher, err := NewOwnerReferenceMatcher(owner, scheme) - if err != nil { - logger.Errorf("failed to initialize owner matcher. %v", err) - } - - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return false - }, - - DeleteFunc: func(e event.DeleteEvent) bool { - match, object, err := ownerMatcher.Match(e.Object) - if err != nil { - logger.Errorf("failed to check if object kind %q matched. %v", e.Object.GetObjectKind(), err) - } - objectName := object.GetName() - if match { - // If the resource is a CM, we might want to ignore it since some of them are ephemeral - isCMToIgnoreOnDelete := isCMToIgnoreOnDelete(e.Object) - if isCMToIgnoreOnDelete { - return false - } - - // If the resource is a canary deployment we don't reconcile because it's ephemeral - isCanary := isCanary(e.Object) - if isCanary { - return false - } - - logger.Infof("object %q matched on delete, reconciling", objectName) - return true - } - - logger.Debugf("object %q did not match on delete", objectName) - return false - }, - - UpdateFunc: func(e event.UpdateEvent) bool { - match, object, err := ownerMatcher.Match(e.ObjectNew) - if err != nil { - logger.Errorf("failed to check if object matched. %v", err) - } - objectName := object.GetName() - if match { - // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request - IsDoNotReconcile := IsDoNotReconcile(object.GetLabels()) - if IsDoNotReconcile { - logger.Debugf("object %q matched on update but %q label is set, doing nothing", DoNotReconcileLabelName, objectName) - return false - } - - logger.Debugf("object %q matched on update", objectName) - - // CONFIGMAP WHITELIST - // Only reconcile on rook-config-override CM changes - isCMTConfigOverride := isCMTConfigOverride(e.ObjectNew) - if isCMTConfigOverride { - logger.Debugf("do reconcile when the cm is %s", k8sutil.ConfigOverrideName) - return true - } - - // If the resource is a ConfigMap we don't reconcile - _, ok := e.ObjectNew.(*corev1.ConfigMap) - if ok { - logger.Debugf("do not reconcile on configmap that is not %q", k8sutil.ConfigOverrideName) - return false - } - - // SECRETS BLACKLIST - // If the resource is a Secret, we might want to ignore it - // We want to reconcile Secrets in case their content gets altered - isSecretToIgnoreOnUpdate := isSecretToIgnoreOnUpdate(e.ObjectNew) - if isSecretToIgnoreOnUpdate { - return false - } - - // If the resource is a deployment we don't reconcile - _, ok = e.ObjectNew.(*appsv1.Deployment) - if ok { - logger.Debug("do not reconcile deployments updates") - return false - } - - // did the object change? - objectChanged, err := objectChanged(e.ObjectOld, e.ObjectNew, objectName) - if err != nil { - logger.Errorf("failed to check if object %q changed. %v", objectName, err) - } - - return objectChanged - } - - return false - }, - - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - } -} - -// isValidEvent analyses the diff between two objects events and determines if we should reconcile -// that event or not. The goal is to avoid double-reconcile as much as possible. -// If the patch could contain sensitive data, isValidEvent will not leak the data to logs. -func isValidEvent(patch []byte, objectName string, patchContainsSensitiveData bool) bool { - var p map[string]interface{} - err := json.Unmarshal(patch, &p) - if err != nil { - logErrorUnlessSensitive("failed to unmarshal patch", err, patchContainsSensitiveData) - return false - } - if !patchContainsSensitiveData { - logger.Debugf("patch before trimming is %s", string(patch)) - } - - // don't reconcile on status update on an object (e.g. status "creating") - logger.Debugf("trimming 'status' field from patch") - delete(p, "status") - - // Do not reconcile on metadata change since managedFields are often updated by the server - logger.Debugf("trimming 'metadata' field from patch") - delete(p, "metadata") - - // If the patch is now empty, we don't reconcile - if len(p) == 0 { - logger.Debug("patch is empty after trimming") - return false - } - - // Re-marshal to get the last diff - patch, err = json.Marshal(p) - if err != nil { - logErrorUnlessSensitive("failed to marshal patch", err, patchContainsSensitiveData) - return false - } - - // If after all the filtering there is still something in the patch, we reconcile - text := string(patch) - if patchContainsSensitiveData { - text = "[redacted patch details due to potentially sensitive content]" - } - logger.Infof("controller will reconcile resource %q based on patch: %s", objectName, text) - - return true -} - -func logErrorUnlessSensitive(msg string, err error, isSensitive bool) { - if isSensitive { - logger.Errorf("%s. [redacted error due to potentially sensitive content]", msg) - } else { - logger.Errorf("%s. %v", msg, err) - } -} - -func isUpgrade(oldLabels, newLabels map[string]string) bool { - oldLabelVal, oldLabelKeyExist := oldLabels[cephVersionLabelKey] - newLabelVal, newLabelKeyExist := newLabels[cephVersionLabelKey] - - // Nothing exists - if !oldLabelKeyExist && !newLabelKeyExist { - return false - } - - // The new object has the label key so we reconcile - if !oldLabelKeyExist && newLabelKeyExist { - return true - } - - // Both objects have the label and values are different so we reconcile - if (oldLabelKeyExist && newLabelKeyExist) && oldLabelVal != newLabelVal { - return true - } - - return false -} - -func isCanary(obj runtime.Object) bool { - // If not a deployment, let's not reconcile - d, ok := obj.(*appsv1.Deployment) - if !ok { - return false - } - - // Get the labels - labels := d.GetLabels() - - labelVal, labelKeyExist := labels["mon_canary"] - if labelKeyExist && labelVal == "true" { - logger.Debugf("do not reconcile %q on monitor canary deployments", d.Name) - return true - } - - return false -} - -func isCMTConfigOverride(obj runtime.Object) bool { - // If not a ConfigMap, let's not reconcile - cm, ok := obj.(*corev1.ConfigMap) - if !ok { - return false - } - - objectName := cm.GetName() - return objectName == k8sutil.ConfigOverrideName -} - -func isCMToIgnoreOnDelete(obj runtime.Object) bool { - // If not a ConfigMap, let's not reconcile - cm, ok := obj.(*corev1.ConfigMap) - if !ok { - return false - } - - objectName := cm.GetName() - // is it the object the temporarily osd config map? - if strings.HasPrefix(objectName, "rook-ceph-osd-") && strings.HasSuffix(objectName, "-status") { - logger.Debugf("do not reconcile on %q config map changes", objectName) - return true - } - - return false -} - -func isSecretToIgnoreOnUpdate(obj runtime.Object) bool { - s, ok := obj.(*corev1.Secret) - if !ok { - return false - } - - objectName := s.GetName() - switch objectName { - case config.StoreName: - logger.Debugf("do not reconcile on %q secret changes", objectName) - return true - } - - return false -} - -func IsDoNotReconcile(labels map[string]string) bool { - value, ok := labels[DoNotReconcileLabelName] - - // Nothing exists - if ok && value == "true" { - return true - } - - return false -} diff --git a/pkg/operator/ceph/controller/predicate_test.go b/pkg/operator/ceph/controller/predicate_test.go deleted file mode 100644 index eb1a5ab4f..000000000 --- a/pkg/operator/ceph/controller/predicate_test.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - name = "my-pool" - namespace = "rook-ceph" - oldReplicas uint = 3 - newReplicas uint = 2 -) - -func TestObjectChanged(t *testing.T) { - - oldObject := &cephv1.CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: cephv1.PoolSpec{ - Replicated: cephv1.ReplicatedSpec{ - Size: oldReplicas, - }, - }, - Status: &cephv1.CephBlockPoolStatus{ - Phase: "", - }, - } - - newObject := &cephv1.CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: cephv1.PoolSpec{ - Replicated: cephv1.ReplicatedSpec{ - Size: oldReplicas, - }, - }, - Status: &cephv1.CephBlockPoolStatus{ - Phase: "", - }, - } - - // Identical - changed, err := objectChanged(oldObject, newObject, "foo") - assert.NoError(t, err) - assert.False(t, changed) - - // Replica size changed - oldObject.Spec.Replicated.Size = newReplicas - changed, err = objectChanged(oldObject, newObject, "foo") - assert.NoError(t, err) - assert.True(t, changed) -} - -func TestIsUpgrade(t *testing.T) { - oldLabel := make(map[string]string) - newLabel := map[string]string{ - "foo": "bar", - } - - // no value do nothing - b := isUpgrade(oldLabel, newLabel) - assert.False(t, b) - - // different value do something - newLabel["ceph_version"] = "15.2.0-octopus" - b = isUpgrade(oldLabel, newLabel) - assert.True(t, b, fmt.Sprintf("%v,%v", oldLabel, newLabel)) - - // same value do nothing - oldLabel["ceph_version"] = "15.2.0-octopus" - newLabel["ceph_version"] = "15.2.0-octopus" - b = isUpgrade(oldLabel, newLabel) - assert.False(t, b, fmt.Sprintf("%v,%v", oldLabel, newLabel)) - - // different value do something - newLabel["ceph_version"] = "15.2.1-octopus" - b = isUpgrade(oldLabel, newLabel) - assert.True(t, b, fmt.Sprintf("%v,%v", oldLabel, newLabel)) -} - -func TestIsValidEvent(t *testing.T) { - obj := "rook-ceph-mon-a" - valid := []byte(`{ - "metadata": {}, - "spec": {}, - "status": { - "conditions": [ - { - "message": "ReplicaSet \"rook-ceph-mon-b-784fc58bf8\" is progressing.", - "reason": "ReplicaSetUpdated", - "type": "Progressing" - } - ] - } - }`) - - b := isValidEvent(valid, obj, false) - assert.True(t, b) - - valid = []byte(`{"foo": "bar"}`) - b = isValidEvent(valid, obj, false) - assert.True(t, b) - - invalid := []byte(`{ - "metadata": {}, - "status": {}, - }`) - b = isValidEvent(invalid, obj, false) - assert.False(t, b) -} - -func TestIsCanary(t *testing.T) { - blockPool := &cephv1.CephBlockPool{} - - assert.False(t, isCanary(blockPool)) - - d := &appsv1.Deployment{} - assert.False(t, isCanary(d)) - - d.Labels = map[string]string{ - "foo": "bar", - } - assert.False(t, isCanary(d)) - - d.Labels["mon_canary"] = "true" - assert.True(t, isCanary(d)) -} - -func TestIsCMToIgnoreOnUpdate(t *testing.T) { - blockPool := &cephv1.CephBlockPool{} - assert.False(t, isCMTConfigOverride(blockPool)) - - cm := &corev1.ConfigMap{} - assert.False(t, isCMTConfigOverride(cm)) - - cm.Name = "rook-ceph-mon-endpoints" - assert.False(t, isCMTConfigOverride(cm)) - - cm.Name = "rook-config-override" - assert.True(t, isCMTConfigOverride(cm)) -} - -func TestIsCMToIgnoreOnDelete(t *testing.T) { - blockPool := &cephv1.CephBlockPool{} - assert.False(t, isCMToIgnoreOnDelete(blockPool)) - - cm := &corev1.ConfigMap{} - assert.False(t, isCMToIgnoreOnDelete(cm)) - - cm.Name = "rook-ceph-mon-endpoints" - assert.False(t, isCMToIgnoreOnDelete(cm)) - - cm.Name = "rook-ceph-osd-minikube-status" - assert.True(t, isCMToIgnoreOnDelete(cm)) -} - -func TestIsSecretToIgnoreOnUpdate(t *testing.T) { - blockPool := &cephv1.CephBlockPool{} - assert.False(t, isSecretToIgnoreOnUpdate(blockPool)) - - s := &corev1.Secret{} - assert.False(t, isSecretToIgnoreOnUpdate(s)) - - s.Name = "foo" - assert.False(t, isSecretToIgnoreOnUpdate(s)) - - s.Name = config.StoreName - assert.True(t, isSecretToIgnoreOnUpdate(s)) -} - -func TestIsDoNotReconcile(t *testing.T) { - l := map[string]string{ - "foo": "bar", - } - - // value not present - b := IsDoNotReconcile(l) - assert.False(t, b) - - // good value wrong content - l["do_not_reconcile"] = "false" - b = IsDoNotReconcile(l) - assert.False(t, b) - - // good value and good content - l["do_not_reconcile"] = "true" - b = IsDoNotReconcile(l) - assert.True(t, b) -} diff --git a/pkg/operator/ceph/controller/spec.go b/pkg/operator/ceph/controller/spec.go deleted file mode 100644 index e5baa2f1c..000000000 --- a/pkg/operator/ceph/controller/spec.go +++ /dev/null @@ -1,721 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package controller provides Kubernetes controller/pod/container spec items used for many Ceph daemons -package controller - -import ( - "context" - "fmt" - "os" - "path" - "reflect" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/display" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // ConfigInitContainerName is the name which is given to the config initialization container - // in all Ceph pods. - ConfigInitContainerName = "config-init" - logVolumeName = "rook-ceph-log" - volumeMountSubPath = "data" - crashVolumeName = "rook-ceph-crash" - daemonSocketDir = "/run/ceph" - initialDelaySecondsNonOSDDaemon int32 = 10 - initialDelaySecondsOSDDaemon int32 = 45 - logCollector = "log-collector" - DaemonIDLabel = "ceph_daemon_id" - daemonTypeLabel = "ceph_daemon_type" - ExternalMgrAppName = "rook-ceph-mgr-external" - ServiceExternalMetricName = "http-external-metrics" -) - -type daemonConfig struct { - daemonType string - daemonID string -} - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "ceph-spec") - -var ( - cronLogRotate = ` -set -xe - -CEPH_CLIENT_ID=%s -PERIODICITY=%s -LOG_ROTATE_CEPH_FILE=/etc/logrotate.d/ceph - -if [ -z "$PERIODICITY" ]; then - PERIODICITY=24h -fi - -# edit the logrotate file to only rotate a specific daemon log -# otherwise we will logrotate log files without reloading certain daemons -# this might happen when multiple daemons run on the same machine -sed -i "s|*.log|$CEPH_CLIENT_ID.log|" "$LOG_ROTATE_CEPH_FILE" - -while true; do - sleep "$PERIODICITY" - echo "starting log rotation" - logrotate --verbose --force "$LOG_ROTATE_CEPH_FILE" - echo "I am going to sleep now, see you in $PERIODICITY" -done -` -) - -// return the volume and matching volume mount for mounting the config override ConfigMap into -// containers as "/etc/ceph/ceph.conf". -func configOverrideConfigMapVolumeAndMount() (v1.Volume, v1.VolumeMount) { - secretAndConfigMapVolumeProjections := []v1.VolumeProjection{} - name := k8sutil.ConfigOverrideName // configmap name and name of volume - dir := config.EtcCephDir - file := "ceph.conf" - // TL;DR: mount the configmap's "config" to a file called "ceph.conf" with 0444 permissions - // security: allow to be read by everyone since now ceph processes run as 'ceph' and not 'root' user - // Further investigation needs to be done to copy the ceph.conf and change its ownership - // since configuring a owner of a ConfigMap secret is currently impossible - // This also works around the following issue: https://tracker.ceph.com/issues/38606 - // - // This design choice avoids the crash/restart situation in Rook - // If we don't set 0444 to the ceph.conf configuration file during its respawn (with exec) the ceph-mgr - // won't be able to read the ceph.conf and the container will die, the "restart" count will increase in k8s - // This will mislead users thinking something won't wrong but that a false positive - mode := int32(0444) - projectionConfigMap := &v1.ConfigMapProjection{Items: []v1.KeyToPath{{Key: k8sutil.ConfigOverrideVal, Path: file, Mode: &mode}}} - projectionConfigMap.Name = name - configMapProjection := v1.VolumeProjection{ - ConfigMap: projectionConfigMap, - } - secretAndConfigMapVolumeProjections = append(secretAndConfigMapVolumeProjections, configMapProjection) - - v := v1.Volume{ - Name: name, - VolumeSource: v1.VolumeSource{ - Projected: &v1.ProjectedVolumeSource{ - Sources: secretAndConfigMapVolumeProjections, - }, - }, - } - - // configmap's "config" to "/etc/ceph/ceph.conf" - m := v1.VolumeMount{ - Name: name, - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: dir, - } - - return v, m -} - -// ConfGeneratedInPodVolumeAndMount generate an empty dir of /etc/ceph -func ConfGeneratedInPodVolumeAndMount() (v1.Volume, v1.VolumeMount) { - name := "ceph-conf-emptydir" - dir := config.EtcCephDir - v := v1.Volume{Name: name, VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}}} - // configmap's "config" to "/etc/ceph/ceph.conf" - m := v1.VolumeMount{ - Name: name, - MountPath: dir, - } - return v, m -} - -// PodVolumes fills in the volumes parameter with the common list of Kubernetes volumes for use in Ceph pods. -// This function is only used for OSDs. -func PodVolumes(dataPaths *config.DataPathMap, dataDirHostPath string, confGeneratedInPod bool) []v1.Volume { - - dataDirSource := v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}} - if dataDirHostPath != "" { - dataDirSource = v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: dataDirHostPath}} - } - configVolume, _ := configOverrideConfigMapVolumeAndMount() - if confGeneratedInPod { - configVolume, _ = ConfGeneratedInPodVolumeAndMount() - } - - v := []v1.Volume{ - {Name: k8sutil.DataDirVolume, VolumeSource: dataDirSource}, - configVolume, - } - v = append(v, StoredLogAndCrashVolume(dataPaths.HostLogDir(), dataPaths.HostCrashDir())...) - - return v -} - -// CephVolumeMounts returns the common list of Kubernetes volume mounts for Ceph containers. -// This function is only used for OSDs. -func CephVolumeMounts(dataPaths *config.DataPathMap, confGeneratedInPod bool) []v1.VolumeMount { - _, configMount := configOverrideConfigMapVolumeAndMount() - if confGeneratedInPod { - _, configMount = ConfGeneratedInPodVolumeAndMount() - } - - v := []v1.VolumeMount{ - {Name: k8sutil.DataDirVolume, MountPath: k8sutil.DataDir}, - configMount, - // Rook doesn't run in ceph containers, so it doesn't need the config override mounted - } - v = append(v, StoredLogAndCrashVolumeMount(dataPaths.ContainerLogDir(), dataPaths.ContainerCrashDir())...) - - return v -} - -// RookVolumeMounts returns the common list of Kubernetes volume mounts for Rook containers. -// This function is only used by OSDs. -func RookVolumeMounts(dataPaths *config.DataPathMap, confGeneratedInPod bool) []v1.VolumeMount { - return CephVolumeMounts(dataPaths, confGeneratedInPod) - -} - -// DaemonVolumesBase returns the common / static set of volumes. -func DaemonVolumesBase(dataPaths *config.DataPathMap, keyringResourceName string) []v1.Volume { - configOverrideVolume, _ := configOverrideConfigMapVolumeAndMount() - vols := []v1.Volume{ - configOverrideVolume, - } - if keyringResourceName != "" { - vols = append(vols, keyring.Volume().Resource(keyringResourceName)) - } - if dataPaths.HostLogAndCrashDir != "" { - // logs are not persisted to host - vols = append(vols, StoredLogAndCrashVolume(dataPaths.HostLogDir(), dataPaths.HostCrashDir())...) - } - return vols -} - -// DaemonVolumesDataPVC returns a PVC volume source for daemon container data. -func DaemonVolumesDataPVC(pvcName string) v1.Volume { - return v1.Volume{ - Name: "ceph-daemon-data", - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }, - } -} - -// DaemonVolumesDataHostPath returns HostPath volume source for daemon container -// data. -func DaemonVolumesDataHostPath(dataPaths *config.DataPathMap) []v1.Volume { - vols := []v1.Volume{} - if dataPaths.ContainerDataDir == "" { - // no data is stored in container, and therefore no data can be persisted to host - return vols - } - // when data is not persisted to host, the data may still be shared between init/run containers - src := v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}} - if dataPaths.HostDataDir != "" { - // data is persisted to host - src = v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: dataPaths.HostDataDir}} - } - return append(vols, v1.Volume{Name: "ceph-daemon-data", VolumeSource: src}) -} - -// DaemonVolumesContainsPVC returns true if a volume exists with a volume source -// configured with a persistent volume claim. -func DaemonVolumesContainsPVC(volumes []v1.Volume) bool { - for _, volume := range volumes { - if volume.VolumeSource.PersistentVolumeClaim != nil { - return true - } - } - return false -} - -// DaemonVolumes returns the pod volumes used by all Ceph daemons. If keyring resource name is -// empty, there will be no keyring volume created from a secret. -func DaemonVolumes(dataPaths *config.DataPathMap, keyringResourceName string) []v1.Volume { - vols := DaemonVolumesBase(dataPaths, keyringResourceName) - vols = append(vols, DaemonVolumesDataHostPath(dataPaths)...) - return vols -} - -// DaemonVolumeMounts returns volume mounts which correspond to the DaemonVolumes. These -// volume mounts are shared by most all Ceph daemon containers, both init and standard. If keyring -// resource name is empty, there will be no keyring mounted in the container. -func DaemonVolumeMounts(dataPaths *config.DataPathMap, keyringResourceName string) []v1.VolumeMount { - _, configOverrideMount := configOverrideConfigMapVolumeAndMount() - mounts := []v1.VolumeMount{ - configOverrideMount, - } - if keyringResourceName != "" { - mounts = append(mounts, keyring.VolumeMount().Resource(keyringResourceName)) - } - if dataPaths.HostLogAndCrashDir != "" { - // logs are not persisted to host, so no mount is needed - mounts = append(mounts, StoredLogAndCrashVolumeMount(dataPaths.ContainerLogDir(), dataPaths.ContainerCrashDir())...) - } - if dataPaths.ContainerDataDir == "" { - // no data is stored in container, so there are no more mounts - return mounts - } - return append(mounts, - v1.VolumeMount{Name: "ceph-daemon-data", MountPath: dataPaths.ContainerDataDir}, - ) -} - -// see AddVolumeMountSubPath -func addVolumeMountSubPathContainer(c *v1.Container, volumeMountName string) { - for i := range c.VolumeMounts { - v := &c.VolumeMounts[i] - if v.Name == volumeMountName { - v.SubPath = volumeMountSubPath - } - } -} - -// AddVolumeMountSubPath updates each init and regular container of the podspec -// such that each volume mount attached to a container is mounted under a -// subpath in the source volume. This is important because some daemons may not -// start if the volume mount directory is non-empty. When the volume is the root -// of an ext4 file system, one may find a "lost+found" directory. -func AddVolumeMountSubPath(podSpec *v1.PodSpec, volumeMountName string) { - for i := range podSpec.InitContainers { - c := &podSpec.InitContainers[i] - addVolumeMountSubPathContainer(c, volumeMountName) - } - for i := range podSpec.Containers { - c := &podSpec.Containers[i] - addVolumeMountSubPathContainer(c, volumeMountName) - } -} - -// DaemonFlags returns the command line flags used by all Ceph daemons. -func DaemonFlags(cluster *client.ClusterInfo, spec *cephv1.ClusterSpec, daemonID string) []string { - flags := append( - config.DefaultFlags(cluster.FSID, keyring.VolumeMount().KeyringFilePath()), - config.NewFlag("id", daemonID), - // Ceph daemons in Rook will run as 'ceph' instead of 'root' - // If we run on a version of Ceph does not these flags it will simply ignore them - //run ceph daemon process under the 'ceph' user - config.NewFlag("setuser", "ceph"), - // run ceph daemon process under the 'ceph' group - config.NewFlag("setgroup", "ceph"), - ) - flags = append(flags, NetworkBindingFlags(cluster, spec)...) - - return flags -} - -// AdminFlags returns the command line flags used for Ceph commands requiring admin authentication. -func AdminFlags(cluster *client.ClusterInfo) []string { - return append( - config.DefaultFlags(cluster.FSID, keyring.VolumeMount().AdminKeyringFilePath()), - config.NewFlag("setuser", "ceph"), - config.NewFlag("setgroup", "ceph"), - ) -} - -func NetworkBindingFlags(cluster *client.ClusterInfo, spec *cephv1.ClusterSpec) []string { - var args []string - - // As of Pacific, Ceph supports dual-stack, so setting IPv6 family without disabling IPv4 binding actually enables dual-stack - // This is likely not user's intent, so on Pacific let's make sure to disable IPv4 when IPv6 is selected - if !spec.Network.DualStack { - switch spec.Network.IPFamily { - case cephv1.IPv4: - args = append(args, config.NewFlag("ms-bind-ipv4", "true")) - args = append(args, config.NewFlag("ms-bind-ipv6", "false")) - - case cephv1.IPv6: - args = append(args, config.NewFlag("ms-bind-ipv4", "false")) - args = append(args, config.NewFlag("ms-bind-ipv6", "true")) - } - } else { - if cluster.CephVersion.IsAtLeastPacific() { - args = append(args, config.NewFlag("ms-bind-ipv4", "true")) - args = append(args, config.NewFlag("ms-bind-ipv6", "true")) - } else { - logger.Info("dual-stack is only supported on ceph pacific") - // Still acknowledge IPv6, nothing to do for IPv4 since it will always be "on" - if spec.Network.IPFamily == cephv1.IPv6 { - args = append(args, config.NewFlag("ms-bind-ipv6", "true")) - } - } - } - - return args -} - -// ContainerEnvVarReference returns a reference to a Kubernetes container env var of the given name -// which can be used in command or argument fields. -func ContainerEnvVarReference(envVarName string) string { - return fmt.Sprintf("$(%s)", envVarName) -} - -// DaemonEnvVars returns the container environment variables used by all Ceph daemons. -func DaemonEnvVars(image string) []v1.EnvVar { - return append( - k8sutil.ClusterDaemonEnvVars(image), - config.StoredMonHostEnvVars()..., - ) -} - -// AppLabels returns labels common for all Rook-Ceph applications which may be useful for admins. -// App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc. -func AppLabels(appName, namespace string) map[string]string { - return map[string]string{ - k8sutil.AppAttr: appName, - k8sutil.ClusterAttr: namespace, - } -} - -// CephDaemonAppLabels returns pod labels common to all Rook-Ceph pods which may be useful for admins. -// App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc. -// Daemon type is the Ceph daemon type: "mon", "mgr", "osd", "mds", "rgw" -// Daemon ID is the ID portion of the Ceph daemon name: "a" for "mon.a"; "c" for "mds.c" -func CephDaemonAppLabels(appName, namespace, daemonType, daemonID string, includeNewLabels bool) map[string]string { - labels := AppLabels(appName, namespace) - - // New labels cannot be applied to match selectors during upgrade - if includeNewLabels { - labels[daemonTypeLabel] = daemonType - } - labels[DaemonIDLabel] = daemonID - // Also report the daemon id keyed by its daemon type: "mon: a", "mds: c", etc. - labels[daemonType] = daemonID - return labels -} - -// CheckPodMemory verify pod's memory limit is valid -func CheckPodMemory(name string, resources v1.ResourceRequirements, cephPodMinimumMemory uint64) error { - // Ceph related PR: https://github.com/ceph/ceph/pull/26856 - podMemoryLimit := resources.Limits.Memory() - podMemoryRequest := resources.Requests.Memory() - - // If nothing was provided let's just return - // This means no restrictions on pod's resources - if podMemoryLimit.IsZero() && podMemoryRequest.IsZero() { - return nil - } - - if !podMemoryLimit.IsZero() { - // This means LIMIT and REQUEST are either identical or different but still we use LIMIT as a reference - if uint64(podMemoryLimit.Value()) < display.MbTob(cephPodMinimumMemory) { - // allow the configuration if less than the min, but print a warning - logger.Warningf("running the %q daemon(s) with %dMB of ram, but at least %dMB is recommended", name, display.BToMb(uint64(podMemoryLimit.Value())), cephPodMinimumMemory) - } - - // This means LIMIT < REQUEST - // Kubernetes will refuse to schedule that pod however it's still valuable to indicate that user's input was incorrect - if uint64(podMemoryLimit.Value()) < uint64(podMemoryRequest.Value()) { - extraErrorLine := `\n - User has specified a pod memory limit %dmb below the pod memory request %dmb in the cluster CR.\n - Rook will create pods that are expected to fail to serve as a more apparent error indicator to the user.` - - return errors.Errorf(extraErrorLine, display.BToMb(uint64(podMemoryLimit.Value())), display.BToMb(uint64(podMemoryRequest.Value()))) - } - } - - return nil -} - -// ChownCephDataDirsInitContainer returns an init container which `chown`s the given data -// directories as the `ceph:ceph` user in the container. It also `chown`s the Ceph log dir in the -// container automatically. -// Doing a chown in a post start lifecycle hook does not reliably complete before the OSD -// process starts, which can cause the pod to fail without the lifecycle hook's chown command -// completing. It can take an arbitrarily long time for a pod restart to successfully chown the -// directory. This is a race condition for all daemons; therefore, do this in an init container. -// See more discussion here: https://github.com/rook/rook/pull/3594#discussion_r312279176 -func ChownCephDataDirsInitContainer( - dpm config.DataPathMap, - containerImage string, - volumeMounts []v1.VolumeMount, - resources v1.ResourceRequirements, - securityContext *v1.SecurityContext, -) v1.Container { - args := make([]string, 0, 5) - args = append(args, - "--verbose", - "--recursive", - "ceph:ceph", - config.VarLogCephDir, - config.VarLibCephCrashDir, - ) - if dpm.ContainerDataDir != "" { - args = append(args, dpm.ContainerDataDir) - } - return v1.Container{ - Name: "chown-container-data-dir", - Command: []string{"chown"}, - Args: args, - Image: containerImage, - VolumeMounts: volumeMounts, - Resources: resources, - SecurityContext: securityContext, - } -} - -// GenerateMinimalCephConfInitContainer returns an init container that will generate the most basic -// Ceph config for connecting non-Ceph daemons to a Ceph cluster (e.g., nfs-ganesha). Effectively -// what this means is that it generates '/etc/ceph/ceph.conf' with 'mon_host' populated and a -// keyring path associated with the user given. 'mon_host' is determined by the 'ROOK_CEPH_MON_HOST' -// env var present in other Ceph daemon pods, and the keyring is expected to be mounted into the -// container with a Kubernetes pod volume+mount. -func GenerateMinimalCephConfInitContainer( - username, keyringPath string, - containerImage string, - volumeMounts []v1.VolumeMount, - resources v1.ResourceRequirements, - securityContext *v1.SecurityContext, -) v1.Container { - cfgPath := client.DefaultConfigFilePath() - // Note that parameters like $(PARAM) will be replaced by Kubernetes with env var content before - // container creation. - confScript := ` -set -xEeuo pipefail - -cat << EOF > ` + cfgPath + ` -[global] -mon_host = $(ROOK_CEPH_MON_HOST) - -[` + username + `] -keyring = ` + keyringPath + ` -EOF - -chmod 444 ` + cfgPath + ` - -cat ` + cfgPath + ` -` - return v1.Container{ - Name: "generate-minimal-ceph-conf", - Command: []string{"/bin/bash", "-c", confScript}, - Args: []string{}, - Image: containerImage, - VolumeMounts: volumeMounts, - Env: config.StoredMonHostEnvVars(), - Resources: resources, - SecurityContext: securityContext, - } -} - -// StoredLogAndCrashVolume returns a pod volume sourced from the stored log and crashes files. -func StoredLogAndCrashVolume(hostLogDir, hostCrashDir string) []v1.Volume { - return []v1.Volume{ - { - Name: logVolumeName, - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{Path: hostLogDir}, - }, - }, - { - Name: crashVolumeName, - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{Path: hostCrashDir}, - }, - }, - } -} - -// StoredLogAndCrashVolumeMount returns a pod volume sourced from the stored log and crashes files. -func StoredLogAndCrashVolumeMount(varLogCephDir, varLibCephCrashDir string) []v1.VolumeMount { - return []v1.VolumeMount{ - { - Name: logVolumeName, - ReadOnly: false, - MountPath: varLogCephDir, - }, - { - Name: crashVolumeName, - ReadOnly: false, - MountPath: varLibCephCrashDir, - }, - } -} - -// GenerateLivenessProbeExecDaemon makes sure a daemon has a socket and that it can be called and returns 0 -func GenerateLivenessProbeExecDaemon(daemonType, daemonID string) *v1.Probe { - confDaemon := getDaemonConfig(daemonType, daemonID) - initialDelaySeconds := initialDelaySecondsNonOSDDaemon - if daemonType == config.OsdType { - initialDelaySeconds = initialDelaySecondsOSDDaemon - } - - return &v1.Probe{ - Handler: v1.Handler{ - Exec: &v1.ExecAction{ - // Run with env -i to clean env variables in the exec context - // This avoids conflict with the CEPH_ARGS env - // - // Example: - // env -i sh -c "ceph --admin-daemon /run/ceph/ceph-osd.0.asok status" - Command: []string{ - "env", - "-i", - "sh", - "-c", - fmt.Sprintf("ceph --admin-daemon %s %s", confDaemon.buildSocketPath(), confDaemon.buildAdminSocketCommand()), - }, - }, - }, - InitialDelaySeconds: initialDelaySeconds, - } -} - -func getDaemonConfig(daemonType, daemonID string) *daemonConfig { - return &daemonConfig{ - daemonType: string(daemonType), - daemonID: daemonID, - } -} - -func (c *daemonConfig) buildSocketName() string { - return fmt.Sprintf("ceph-%s.%s.asok", c.daemonType, c.daemonID) -} - -func (c *daemonConfig) buildSocketPath() string { - return path.Join(daemonSocketDir, c.buildSocketName()) -} - -func (c *daemonConfig) buildAdminSocketCommand() string { - command := "status" - if c.daemonType == config.MonType { - command = "mon_status" - } - - return command -} - -// PodSecurityContext detects if the pod needs privileges to run -func PodSecurityContext() *v1.SecurityContext { - privileged := false - if os.Getenv("ROOK_HOSTPATH_REQUIRES_PRIVILEGED") == "true" { - privileged = true - } - - return &v1.SecurityContext{ - Privileged: &privileged, - } -} - -// LogCollectorContainer runs a cron job to rotate logs -func LogCollectorContainer(daemonID, ns string, c cephv1.ClusterSpec) *v1.Container { - return &v1.Container{ - Name: logCollector, - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(cronLogRotate, daemonID, c.LogCollector.Periodicity), - }, - Image: c.CephVersion.Image, - VolumeMounts: DaemonVolumeMounts(config.NewDatalessDaemonDataPathMap(ns, c.DataDirHostPath), ""), - SecurityContext: PodSecurityContext(), - Resources: cephv1.GetLogCollectorResources(c.Resources), - } -} - -// CreateExternalMetricsEndpoints creates external metric endpoint -func createExternalMetricsEndpoints(namespace string, monitoringSpec cephv1.MonitoringSpec, ownerInfo *k8sutil.OwnerInfo) (*v1.Endpoints, error) { - labels := AppLabels("rook-ceph-mgr", namespace) - - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: ExternalMgrAppName, - Namespace: namespace, - Labels: labels, - }, - Subsets: []v1.EndpointSubset{ - { - Addresses: monitoringSpec.ExternalMgrEndpoints, - Ports: []v1.EndpointPort{ - { - Name: ServiceExternalMetricName, - Port: int32(monitoringSpec.ExternalMgrPrometheusPort), - Protocol: v1.ProtocolTCP, - }, - }, - }, - }, - } - - err := ownerInfo.SetControllerReference(endpoints) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to metric endpoints %q", endpoints.Name) - } - - return endpoints, nil -} - -func ConfigureExternalMetricsEndpoint(ctx *clusterd.Context, monitoringSpec cephv1.MonitoringSpec, clusterInfo *client.ClusterInfo, ownerInfo *k8sutil.OwnerInfo) error { - if len(monitoringSpec.ExternalMgrEndpoints) == 0 { - logger.Debug("no metric endpoint configured, doing nothing") - return nil - } - - // Get active mgr - var activeMgrAddr string - // We use mgr dump and not stat because we want the IP address - mgrMap, err := client.CephMgrMap(ctx, clusterInfo) - if err != nil { - logger.Errorf("failed to get mgr map. %v", err) - } else { - activeMgrAddr = extractMgrIP(mgrMap.ActiveAddr) - } - logger.Debugf("active mgr addr %q", activeMgrAddr) - - // If the active manager is different than the one in the spec we override it - // This happens when a standby manager becomes active - if activeMgrAddr != monitoringSpec.ExternalMgrEndpoints[0].IP { - monitoringSpec.ExternalMgrEndpoints[0].IP = activeMgrAddr - } - - // Create external monitoring Endpoints - endpoint, err := createExternalMetricsEndpoints(clusterInfo.Namespace, monitoringSpec, ownerInfo) - if err != nil { - return err - } - - // Get the endpoint to see if anything needs to be updated - currentEndpoints, err := ctx.Clientset.CoreV1().Endpoints(clusterInfo.Namespace).Get(context.TODO(), endpoint.Name, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return errors.Wrap(err, "failed to fetch endpoints") - } - - // If endpoints are identical there is nothing to do - if reflect.DeepEqual(currentEndpoints, endpoint) { - return nil - } - logger.Debugf("diff between current endpoint and newly generated one: %v \n", cmp.Diff(currentEndpoints, endpoint, cmp.Comparer(func(x, y resource.Quantity) bool { return x.Cmp(y) == 0 }))) - - _, err = k8sutil.CreateOrUpdateEndpoint(ctx.Clientset, clusterInfo.Namespace, endpoint) - if err != nil { - return errors.Wrap(err, "failed to create or update mgr endpoint") - } - - return nil -} - -func extractMgrIP(rawActiveAddr string) string { - return strings.Split(rawActiveAddr, ":")[0] -} diff --git a/pkg/operator/ceph/controller/spec_test.go b/pkg/operator/ceph/controller/spec_test.go deleted file mode 100644 index 7836b02ec..000000000 --- a/pkg/operator/ceph/controller/spec_test.go +++ /dev/null @@ -1,394 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "math" - "reflect" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestPodVolumes(t *testing.T) { - dataPathMap := config.NewDatalessDaemonDataPathMap("rook-ceph", "/var/lib/rook") - - if err := test.VolumeIsEmptyDir(k8sutil.DataDirVolume, PodVolumes(dataPathMap, "", false)); err != nil { - t.Errorf("PodVolumes(\"\") - data dir source is not EmptyDir: %s", err.Error()) - } - if err := test.VolumeIsHostPath(k8sutil.DataDirVolume, "/dev/sdb", PodVolumes(dataPathMap, "/dev/sdb", false)); err != nil { - t.Errorf("PodVolumes(\"/dev/sdb\") - data dir source is not HostPath: %s", err.Error()) - } -} - -func TestMountsMatchVolumes(t *testing.T) { - - dataPathMap := config.NewDatalessDaemonDataPathMap("rook-ceph", "/var/lib/rook") - - volsMountsTestDef := test.VolumesAndMountsTestDefinition{ - VolumesSpec: &test.VolumesSpec{ - Moniker: "PodVolumes(\"/dev/sdc\")", Volumes: PodVolumes(dataPathMap, "/dev/sdc", false)}, - MountsSpecItems: []*test.MountsSpec{ - {Moniker: "CephVolumeMounts(true)", Mounts: CephVolumeMounts(dataPathMap, false)}, - {Moniker: "RookVolumeMounts(true)", Mounts: RookVolumeMounts(dataPathMap, false)}}, - } - volsMountsTestDef.TestMountsMatchVolumes(t) - - volsMountsTestDef = test.VolumesAndMountsTestDefinition{ - VolumesSpec: &test.VolumesSpec{ - Moniker: "PodVolumes(\"/dev/sdc\")", Volumes: PodVolumes(dataPathMap, "/dev/sdc", true)}, - MountsSpecItems: []*test.MountsSpec{ - {Moniker: "CephVolumeMounts(false)", Mounts: CephVolumeMounts(dataPathMap, true)}, - {Moniker: "RookVolumeMounts(false)", Mounts: RookVolumeMounts(dataPathMap, true)}}, - } - volsMountsTestDef.TestMountsMatchVolumes(t) -} - -func TestCheckPodMemory(t *testing.T) { - // This value is in MB - const PodMinimumMemory uint64 = 1024 - name := "test" - - // A value for the memory used in the tests - var memory_value = int64(PodMinimumMemory * 8 * uint64(math.Pow10(6))) - - // Case 1: No memory limits, no memory requested - test_resource := v1.ResourceRequirements{} - - if err := CheckPodMemory(name, test_resource, PodMinimumMemory); err != nil { - t.Errorf("Error case 1: %s", err.Error()) - } - - // Case 2: memory limit and memory requested - test_resource = v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(memory_value, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(memory_value, resource.BinarySI), - }, - } - - if err := CheckPodMemory(name, test_resource, PodMinimumMemory); err != nil { - t.Errorf("Error case 2: %s", err.Error()) - } - - // Only memory requested - test_resource = v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(memory_value, resource.BinarySI), - }, - } - - if err := CheckPodMemory(name, test_resource, PodMinimumMemory); err != nil { - t.Errorf("Error case 3: %s", err.Error()) - } -} - -func TestBuildAdminSocketCommand(t *testing.T) { - c := getDaemonConfig(config.OsdType, "") - - command := c.buildAdminSocketCommand() - assert.Equal(t, "status", command) - - c.daemonType = config.MonType - command = c.buildAdminSocketCommand() - assert.Equal(t, "mon_status", command) -} - -func TestBuildSocketName(t *testing.T) { - daemonID := "0" - c := getDaemonConfig(config.OsdType, daemonID) - - socketName := c.buildSocketName() - assert.Equal(t, "ceph-osd.0.asok", socketName) - - c.daemonType = config.MonType - c.daemonID = "a" - socketName = c.buildSocketName() - assert.Equal(t, "ceph-mon.a.asok", socketName) -} - -func TestBuildSocketPath(t *testing.T) { - daemonID := "0" - c := getDaemonConfig(config.OsdType, daemonID) - - socketPath := c.buildSocketPath() - assert.Equal(t, "/run/ceph/ceph-osd.0.asok", socketPath) -} - -func TestGenerateLivenessProbeExecDaemon(t *testing.T) { - daemonID := "0" - probe := GenerateLivenessProbeExecDaemon(config.OsdType, daemonID) - expectedCommand := []string{"env", - "-i", - "sh", - "-c", - "ceph --admin-daemon /run/ceph/ceph-osd.0.asok status", - } - - assert.Equal(t, expectedCommand, probe.Handler.Exec.Command) - // it's an OSD the delay must be 45 - assert.Equal(t, initialDelaySecondsOSDDaemon, probe.InitialDelaySeconds) - - // test with a mon so the delay should be 10 - probe = GenerateLivenessProbeExecDaemon(config.MonType, "a") - assert.Equal(t, initialDelaySecondsNonOSDDaemon, probe.InitialDelaySeconds) -} - -func TestDaemonFlags(t *testing.T) { - testcases := []struct { - label string - clusterInfo *cephclient.ClusterInfo - clusterSpec *cephv1.ClusterSpec - daemonID string - expected []string - }{ - { - label: "case 1: IPv6 enabled", - clusterInfo: &cephclient.ClusterInfo{ - FSID: "id", - }, - clusterSpec: &cephv1.ClusterSpec{ - Network: cephv1.NetworkSpec{ - IPFamily: "IPv6", - }, - }, - daemonID: "daemon-id", - expected: []string{"--fsid=id", "--keyring=/etc/ceph/keyring-store/keyring", "--log-to-stderr=true", "--err-to-stderr=true", - "--mon-cluster-log-to-stderr=true", "--log-stderr-prefix=debug ", "--default-log-to-file=false", "--default-mon-cluster-log-to-file=false", - "--mon-host=$(ROOK_CEPH_MON_HOST)", "--mon-initial-members=$(ROOK_CEPH_MON_INITIAL_MEMBERS)", "--id=daemon-id", "--setuser=ceph", "--setgroup=ceph", - "--ms-bind-ipv4=false", "--ms-bind-ipv6=true"}, - }, - { - label: "case 2: IPv6 disabled", - clusterInfo: &cephclient.ClusterInfo{ - FSID: "id", - }, - clusterSpec: &cephv1.ClusterSpec{}, - daemonID: "daemon-id", - expected: []string{"--fsid=id", "--keyring=/etc/ceph/keyring-store/keyring", "--log-to-stderr=true", "--err-to-stderr=true", - "--mon-cluster-log-to-stderr=true", "--log-stderr-prefix=debug ", "--default-log-to-file=false", "--default-mon-cluster-log-to-file=false", - "--mon-host=$(ROOK_CEPH_MON_HOST)", "--mon-initial-members=$(ROOK_CEPH_MON_INITIAL_MEMBERS)", "--id=daemon-id", "--setuser=ceph", "--setgroup=ceph"}, - }, - } - - for _, tc := range testcases { - actual := DaemonFlags(tc.clusterInfo, tc.clusterSpec, tc.daemonID) - assert.Equalf(t, tc.expected, actual, "[%s]: failed to get correct daemonset flags", tc.label) - } -} - -func TestNetworkBindingFlags(t *testing.T) { - ipv4FlagTrue := "--ms-bind-ipv4=true" - ipv4FlagFalse := "--ms-bind-ipv4=false" - ipv6FlagTrue := "--ms-bind-ipv6=true" - ipv6FlagFalse := "--ms-bind-ipv6=false" - type args struct { - cluster *client.ClusterInfo - spec *cephv1.ClusterSpec - } - tests := []struct { - name string - args args - want []string - }{ - {"octopus-ipv4", args{cluster: &client.ClusterInfo{CephVersion: version.Octopus}, spec: &cephv1.ClusterSpec{Network: cephv1.NetworkSpec{IPFamily: cephv1.IPv4}}}, []string{ipv4FlagTrue, ipv6FlagFalse}}, - {"octopus-ipv6", args{cluster: &client.ClusterInfo{CephVersion: version.Octopus}, spec: &cephv1.ClusterSpec{Network: cephv1.NetworkSpec{IPFamily: cephv1.IPv6}}}, []string{ipv4FlagFalse, ipv6FlagTrue}}, - {"octopus-dualstack-unsupported", args{cluster: &client.ClusterInfo{CephVersion: version.Octopus}, spec: &cephv1.ClusterSpec{Network: cephv1.NetworkSpec{IPFamily: cephv1.IPv4, DualStack: true}}}, []string{}}, - {"octopus-dualstack-unsupported-by-ipv6", args{cluster: &client.ClusterInfo{CephVersion: version.Octopus}, spec: &cephv1.ClusterSpec{Network: cephv1.NetworkSpec{IPFamily: cephv1.IPv6, DualStack: true}}}, []string{ipv6FlagTrue}}, - {"pacific-ipv4", args{cluster: &client.ClusterInfo{CephVersion: version.Pacific}, spec: &cephv1.ClusterSpec{Network: cephv1.NetworkSpec{IPFamily: cephv1.IPv4}}}, []string{ipv4FlagTrue, ipv6FlagFalse}}, - {"pacific-ipv6", args{cluster: &client.ClusterInfo{CephVersion: version.Pacific}, spec: &cephv1.ClusterSpec{Network: cephv1.NetworkSpec{IPFamily: cephv1.IPv6}}}, []string{ipv4FlagFalse, ipv6FlagTrue}}, - {"pacific-dualstack-supported", args{cluster: &client.ClusterInfo{CephVersion: version.Pacific}, spec: &cephv1.ClusterSpec{Network: cephv1.NetworkSpec{IPFamily: cephv1.IPv6, DualStack: true}}}, []string{ipv4FlagTrue, ipv6FlagTrue}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := NetworkBindingFlags(tt.args.cluster, tt.args.spec); !reflect.DeepEqual(got, tt.want) { - if len(got) != 0 && len(tt.want) != 0 { - t.Errorf("NetworkBindingFlags() = %+v, want %+v", got, tt.want) - } - } - }) - } -} - -func TestExtractMgrIP(t *testing.T) { - activeMgrRaw := "172.17.0.12:6801/2535462469" - ip := extractMgrIP(activeMgrRaw) - assert.Equal(t, "172.17.0.12", ip) -} - -func TestConfigureExternalMetricsEndpoint(t *testing.T) { - t.Run("spec and current active mgr endpoint identical with no existing endpoint object", func(t *testing.T) { - monitoringSpec := cephv1.MonitoringSpec{ - Enabled: true, - RulesNamespace: "rook-ceph", - ExternalMgrEndpoints: []v1.EndpointAddress{{IP: "192.168.0.1"}}, - } - clusterInfo := &cephclient.ClusterInfo{ - FSID: "id", - Namespace: "rook-ceph", - } - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "dump" { - return fmt.Sprintf(`{"active_addr":"%s"}`, "192.168.0.1:6801/2535462469"), nil - } - return "", errors.New("unknown command") - }, - } - - ctx := &clusterd.Context{ - Clientset: test.New(t, 3), - RookClientset: rookclient.NewSimpleClientset(), - Executor: executor, - } - - err := ConfigureExternalMetricsEndpoint(ctx, monitoringSpec, clusterInfo, cephclient.NewMinimumOwnerInfo(t)) - assert.NoError(t, err) - - currentEndpoints, err := ctx.Clientset.CoreV1().Endpoints(namespace).Get(context.TODO(), "rook-ceph-mgr-external", metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, "192.168.0.1", currentEndpoints.Subsets[0].Addresses[0].IP, currentEndpoints) - }) - - t.Run("spec and current active mgr endpoint different with no existing endpoint object", func(t *testing.T) { - monitoringSpec := cephv1.MonitoringSpec{ - Enabled: true, - RulesNamespace: "rook-ceph", - ExternalMgrEndpoints: []v1.EndpointAddress{{IP: "192.168.0.1"}}, - } - clusterInfo := &cephclient.ClusterInfo{ - FSID: "id", - Namespace: "rook-ceph", - } - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "dump" { - return fmt.Sprintf(`{"active_addr":"%s"}`, "172.17.0.12:6801/2535462469"), nil - } - return "", errors.New("unknown command") - }, - } - ctx := &clusterd.Context{ - Clientset: test.New(t, 3), - RookClientset: rookclient.NewSimpleClientset(), - Executor: executor, - } - - err := ConfigureExternalMetricsEndpoint(ctx, monitoringSpec, clusterInfo, cephclient.NewMinimumOwnerInfo(t)) - assert.NoError(t, err) - - currentEndpoints, err := ctx.Clientset.CoreV1().Endpoints(namespace).Get(context.TODO(), "rook-ceph-mgr-external", metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, "172.17.0.12", currentEndpoints.Subsets[0].Addresses[0].IP, currentEndpoints) - }) - - t.Run("spec and current active mgr endpoint different with existing endpoint object", func(t *testing.T) { - monitoringSpec := cephv1.MonitoringSpec{ - Enabled: true, - RulesNamespace: "rook-ceph", - ExternalMgrEndpoints: []v1.EndpointAddress{{IP: "192.168.0.1"}}, - } - clusterInfo := &cephclient.ClusterInfo{ - FSID: "id", - Namespace: "rook-ceph", - } - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "dump" { - return fmt.Sprintf(`{"active_addr":"%s"}`, "172.17.0.12:6801/2535462469"), nil - } - return "", errors.New("unknown command") - }, - } - ctx := &clusterd.Context{ - Clientset: test.New(t, 3), - RookClientset: rookclient.NewSimpleClientset(), - Executor: executor, - } - ownerInfo := cephclient.NewMinimumOwnerInfo(t) - ep, err := createExternalMetricsEndpoints(clusterInfo.Namespace, monitoringSpec, ownerInfo) - assert.NoError(t, err) - _, err = ctx.Clientset.CoreV1().Endpoints(namespace).Create(context.TODO(), ep, metav1.CreateOptions{}) - assert.NoError(t, err) - - err = ConfigureExternalMetricsEndpoint(ctx, monitoringSpec, clusterInfo, ownerInfo) - assert.NoError(t, err) - - currentEndpoints, err := ctx.Clientset.CoreV1().Endpoints(namespace).Get(context.TODO(), "rook-ceph-mgr-external", metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, "172.17.0.12", currentEndpoints.Subsets[0].Addresses[0].IP, currentEndpoints) - }) - - t.Run("spec and current active mgr endpoint identical with existing endpoint object", func(t *testing.T) { - monitoringSpec := cephv1.MonitoringSpec{ - Enabled: true, - RulesNamespace: "rook-ceph", - ExternalMgrEndpoints: []v1.EndpointAddress{{IP: "192.168.0.1"}}, - } - clusterInfo := &cephclient.ClusterInfo{ - FSID: "id", - Namespace: "rook-ceph", - } - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "dump" { - return fmt.Sprintf(`{"active_addr":"%s"}`, "192.168.0.1:6801/2535462469"), nil - } - return "", errors.New("unknown command") - }, - } - ctx := &clusterd.Context{ - Clientset: test.New(t, 3), - RookClientset: rookclient.NewSimpleClientset(), - Executor: executor, - } - ownerInfo := cephclient.NewMinimumOwnerInfo(t) - ep, err := createExternalMetricsEndpoints(clusterInfo.Namespace, monitoringSpec, ownerInfo) - assert.NoError(t, err) - _, err = ctx.Clientset.CoreV1().Endpoints(namespace).Create(context.TODO(), ep, metav1.CreateOptions{}) - assert.NoError(t, err) - - err = ConfigureExternalMetricsEndpoint(ctx, monitoringSpec, clusterInfo, ownerInfo) - assert.NoError(t, err) - - currentEndpoints, err := ctx.Clientset.CoreV1().Endpoints(namespace).Get(context.TODO(), "rook-ceph-mgr-external", metav1.GetOptions{}) - assert.NoError(t, err) - assert.Equal(t, "192.168.0.1", currentEndpoints.Subsets[0].Addresses[0].IP, currentEndpoints) - }) -} diff --git a/pkg/operator/ceph/controller/version.go b/pkg/operator/ceph/controller/version.go deleted file mode 100644 index 82d4565c7..000000000 --- a/pkg/operator/ceph/controller/version.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" -) - -// ValidateCephVersionsBetweenLocalAndExternalClusters makes sure an external cluster can be connected -// by checking the external ceph versions available and comparing it with the local image provided -func ValidateCephVersionsBetweenLocalAndExternalClusters(context *clusterd.Context, clusterInfo *client.ClusterInfo) (cephver.CephVersion, error) { - // health check should tell us if the external cluster has been upgraded and display a message - externalVersion, err := client.GetCephMonVersion(context, clusterInfo) - if err != nil { - return cephver.CephVersion{}, errors.Wrap(err, "failed to get ceph mon version") - } - - return *externalVersion, cephver.ValidateCephVersionsBetweenLocalAndExternalClusters(clusterInfo.CephVersion, *externalVersion) -} - -// GetImageVersion returns the CephVersion registered for a specified image (if any) and whether any image was found. -func GetImageVersion(cephCluster cephv1.CephCluster) (*cephver.CephVersion, error) { - // If the Ceph cluster has not yet recorded the image and version for the current image in its spec, then the Crash - // controller should wait for the version to be detected. - if cephCluster.Status.CephVersion != nil && cephCluster.Spec.CephVersion.Image == cephCluster.Status.CephVersion.Image { - logger.Debugf("ceph version found %q", cephCluster.Status.CephVersion.Version) - return ExtractCephVersionFromLabel(cephCluster.Status.CephVersion.Version) - } - - return nil, errors.New("attempt to determine ceph version for the current cluster image timed out") -} diff --git a/pkg/operator/ceph/cr_manager.go b/pkg/operator/ceph/cr_manager.go deleted file mode 100644 index d18325b3c..000000000 --- a/pkg/operator/ceph/cr_manager.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operator - -import ( - "context" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/operator/ceph/cluster" - "github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig" - - "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -func (o *Operator) startManager(namespaceToWatch string, context context.Context, mgrErrorCh chan error) { - // Set up a manager - mgrOpts := manager.Options{ - LeaderElection: false, - Namespace: namespaceToWatch, - } - - logger.Info("setting up the controller-runtime manager") - kubeConfig, err := config.GetConfig() - if err != nil { - mgrErrorCh <- errors.Wrap(err, "failed to get client config for controller-runtime manager") - return - } - - mgr, err := manager.New(kubeConfig, mgrOpts) - if err != nil { - mgrErrorCh <- errors.Wrap(err, "failed to set up overall controller-runtime manager") - return - } - - // options to pass to the controllers - controllerOpts := &controllerconfig.Context{ - RookImage: o.rookImage, - ClusterdContext: o.context, - OperatorNamespace: o.operatorNamespace, - ReconcileCanaries: &controllerconfig.LockingBool{}, - } - // Add the registered controllers to the manager (entrypoint for controllers) - err = cluster.AddToManager(mgr, controllerOpts, o.clusterController) - if err != nil { - mgrErrorCh <- errors.Wrap(err, "failed to add controllers to controller-runtime manager") - return - } - - logger.Info("starting the controller-runtime manager") - if err := mgr.Start(context); err != nil { - mgrErrorCh <- errors.Wrap(err, "failed to run the controller-runtime manager") - return - } -} diff --git a/pkg/operator/ceph/csi/betav1csidriver.go b/pkg/operator/ceph/csi/betav1csidriver.go deleted file mode 100644 index 4d98941ab..000000000 --- a/pkg/operator/ceph/csi/betav1csidriver.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -TODO: storage.k8s.io/v1beta1 CSIDriver is deprecated in Kubernetes v1.19+, unavailable in v1.22+; -Once the support of older Kubernetes releases are removed in Rook, delete the file to -remove the support for the betav1 CSIDriver object. -*/ - -package csi - -import ( - "context" - - "github.com/pkg/errors" - betav1k8scsi "k8s.io/api/storage/v1beta1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/typed/storage/v1beta1" -) - -type beta1CsiDriver struct { - csiDriver *betav1k8scsi.CSIDriver - csiClient v1beta1.CSIDriverInterface -} - -// createCSIDriverInfo Registers CSI driver by creating a CSIDriver object -func (d beta1CsiDriver) createCSIDriverInfo(ctx context.Context, clientset kubernetes.Interface, name, fsGroupPolicy string) error { - attach := true - mountInfo := false - // Create CSIDriver object - csiDriver := &betav1k8scsi.CSIDriver{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: betav1k8scsi.CSIDriverSpec{ - AttachRequired: &attach, - PodInfoOnMount: &mountInfo, - }, - } - if fsGroupPolicy != "" { - policy := betav1k8scsi.FSGroupPolicy(fsGroupPolicy) - csiDriver.Spec.FSGroupPolicy = &policy - } - csidrivers := clientset.StorageV1beta1().CSIDrivers() - driver, err := csidrivers.Get(ctx, name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - _, err = csidrivers.Create(ctx, csiDriver, metav1.CreateOptions{}) - if err != nil { - return err - } - logger.Infof("CSIDriver object created for driver %q", name) - } - return err - } - - // As FSGroupPolicy field is immutable, should be set only during create time. - // if the request is to change the FSGroupPolicy, we are deleting the CSIDriver object and creating it. - if driver.Spec.FSGroupPolicy != nil && csiDriver.Spec.FSGroupPolicy != nil && *driver.Spec.FSGroupPolicy != *csiDriver.Spec.FSGroupPolicy { - d.csiClient = csidrivers - d.csiDriver = csiDriver - return d.reCreateCSIDriverInfo(ctx) - } - - // For csidriver we need to provide the resourceVersion when updating the object. - // From the docs (https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata) - // > "This value MUST be treated as opaque by clients and passed unmodified back to the server" - csiDriver.ObjectMeta.ResourceVersion = driver.ObjectMeta.ResourceVersion - _, err = csidrivers.Update(ctx, csiDriver, metav1.UpdateOptions{}) - if err != nil { - return err - } - logger.Infof("CSIDriver object updated for driver %q", name) - return nil -} - -func (d beta1CsiDriver) reCreateCSIDriverInfo(ctx context.Context) error { - err := d.csiClient.Delete(ctx, d.csiDriver.Name, metav1.DeleteOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to delete CSIDriver object for driver %q", d.csiDriver.Name) - } - logger.Infof("CSIDriver object deleted for driver %q", d.csiDriver.Name) - _, err = d.csiClient.Create(ctx, d.csiDriver, metav1.CreateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to recreate CSIDriver object for driver %q", d.csiDriver.Name) - } - logger.Infof("CSIDriver object recreated for driver %q", d.csiDriver.Name) - return nil -} - -// deleteCSIDriverInfo deletes CSIDriverInfo and returns the error if any -func (d beta1CsiDriver) deleteCSIDriverInfo(ctx context.Context, clientset kubernetes.Interface, name string) error { - err := clientset.StorageV1beta1().CSIDrivers().Delete(ctx, name, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - logger.Debug("%q CSIDriver not found; skipping deletion.", name) - return nil - } - } - return err -} diff --git a/pkg/operator/ceph/csi/cluster_config.go b/pkg/operator/ceph/csi/cluster_config.go deleted file mode 100644 index 083f309b2..000000000 --- a/pkg/operator/ceph/csi/cluster_config.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "context" - "encoding/json" - "os" - "sync" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "ceph-csi") -) - -type csiClusterConfigEntry struct { - ClusterID string `json:"clusterID"` - Monitors []string `json:"monitors"` -} - -type csiClusterConfig []csiClusterConfigEntry - -// FormatCsiClusterConfig returns a json-formatted string containing -// the cluster-to-mon mapping required to configure ceph csi. -func FormatCsiClusterConfig( - clusterKey string, mons map[string]*cephclient.MonInfo) (string, error) { - - cc := make(csiClusterConfig, 1) - cc[0].ClusterID = clusterKey - cc[0].Monitors = []string{} - for _, m := range mons { - cc[0].Monitors = append(cc[0].Monitors, m.Endpoint) - } - - ccJson, err := json.Marshal(cc) - if err != nil { - return "", errors.Wrap(err, "failed to marshal csi cluster config") - } - return string(ccJson), nil -} - -func parseCsiClusterConfig(c string) (csiClusterConfig, error) { - var cc csiClusterConfig - err := json.Unmarshal([]byte(c), &cc) - if err != nil { - return cc, errors.Wrap(err, "failed to parse csi cluster config") - } - return cc, nil -} - -func formatCsiClusterConfig(cc csiClusterConfig) (string, error) { - ccJson, err := json.Marshal(cc) - if err != nil { - return "", errors.Wrap(err, "failed to marshal csi cluster config") - } - return string(ccJson), nil -} - -func monEndpoints(mons map[string]*cephclient.MonInfo) []string { - endpoints := make([]string, 0) - for _, m := range mons { - endpoints = append(endpoints, m.Endpoint) - } - return endpoints -} - -// UpdateCsiClusterConfig returns a json-formatted string containing -// the cluster-to-mon mapping required to configure ceph csi. -func UpdateCsiClusterConfig( - curr, clusterKey string, mons map[string]*cephclient.MonInfo) (string, error) { - - var ( - cc csiClusterConfig - centry csiClusterConfigEntry - found bool - ) - cc, err := parseCsiClusterConfig(curr) - if err != nil { - return "", errors.Wrap(err, "failed to parse current csi cluster config") - } - - for i, centry := range cc { - if centry.ClusterID == clusterKey { - centry.Monitors = monEndpoints(mons) - found = true - cc[i] = centry - break - } - } - if !found { - centry.ClusterID = clusterKey - centry.Monitors = monEndpoints(mons) - cc = append(cc, centry) - } - return formatCsiClusterConfig(cc) -} - -// CreateCsiConfigMap creates an empty config map that will be later used -// to provide cluster configuration to ceph-csi. If a config map already -// exists, it will return it. -func CreateCsiConfigMap(namespace string, clientset kubernetes.Interface, ownerInfo *k8sutil.OwnerInfo) error { - ctx := context.TODO() - configMap := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: ConfigName, - Namespace: namespace, - }, - } - configMap.Data = map[string]string{ - ConfigKey: "[]", - } - - err := ownerInfo.SetControllerReference(configMap) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to csi configmap %q", configMap.Name) - } - _, err = clientset.CoreV1().ConfigMaps(namespace).Create(ctx, configMap, metav1.CreateOptions{}) - if err != nil { - if !k8serrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create initial csi config map %q (in %q)", configMap.Name, namespace) - } - } - - logger.Infof("successfully created csi config map %q", configMap.Name) - return nil -} - -// SaveClusterConfig updates the config map used to provide ceph-csi with -// basic cluster configuration. The clusterNamespace and clusterInfo are -// used to determine what "cluster" in the config map will be updated and -// and the clusterNamespace value is expected to match the clusterID -// value that is provided to ceph-csi uses in the storage class. -// The locker l is typically a mutex and is used to prevent the config -// map from being updated for multiple clusters simultaneously. -func SaveClusterConfig( - clientset kubernetes.Interface, clusterNamespace string, - clusterInfo *cephclient.ClusterInfo, l sync.Locker) error { - ctx := context.TODO() - - if !CSIEnabled() { - return nil - } - l.Lock() - defer l.Unlock() - // csi is deployed into the same namespace as the operator - csiNamespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - if csiNamespace == "" { - return errors.Errorf("namespace value missing for %s", k8sutil.PodNamespaceEnvVar) - } - logger.Debugf("Using %+v for CSI ConfigMap Namespace", csiNamespace) - - // fetch current ConfigMap contents - configMap, err := clientset.CoreV1().ConfigMaps(csiNamespace).Get(ctx, - ConfigName, metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, "failed to fetch current csi config map") - } - - // update ConfigMap contents for current cluster - currData := configMap.Data[ConfigKey] - if currData == "" { - currData = "[]" - } - newData, err := UpdateCsiClusterConfig( - currData, clusterNamespace, clusterInfo.Monitors) - if err != nil { - return errors.Wrap(err, "failed to update csi config map data") - } - configMap.Data[ConfigKey] = newData - - // update ConfigMap with new contents - if _, err := clientset.CoreV1().ConfigMaps(csiNamespace).Update(ctx, configMap, metav1.UpdateOptions{}); err != nil { - return errors.Wrapf(err, "failed to update csi config map") - } - - return nil -} diff --git a/pkg/operator/ceph/csi/cluster_config_test.go b/pkg/operator/ceph/csi/cluster_config_test.go deleted file mode 100644 index 2def6e45b..000000000 --- a/pkg/operator/ceph/csi/cluster_config_test.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "testing" - - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/stretchr/testify/assert" -) - -func TestUpdateCsiClusterConfig(t *testing.T) { - // initialize an empty list & add a simple mons list - mons := map[string]*cephclient.MonInfo{ - "foo": {Name: "foo", Endpoint: "1.2.3.4:5000"}, - } - s, err := UpdateCsiClusterConfig("[]", "alpha", mons) - assert.NoError(t, err) - assert.Equal(t, s, - `[{"clusterID":"alpha","monitors":["1.2.3.4:5000"]}]`) - - // add a 2nd mon to the current cluster - mons["bar"] = &cephclient.MonInfo{ - Name: "bar", Endpoint: "10.11.12.13:5000"} - s, err = UpdateCsiClusterConfig(s, "alpha", mons) - assert.NoError(t, err) - cc, err := parseCsiClusterConfig(s) - assert.NoError(t, err) - assert.Equal(t, len(cc), 1) - assert.Equal(t, cc[0].ClusterID, "alpha") - assert.Contains(t, cc[0].Monitors, "1.2.3.4:5000") - assert.Contains(t, cc[0].Monitors, "10.11.12.13:5000") - assert.Equal(t, len(cc[0].Monitors), 2) - - // add a 2nd cluster with 3 mons - mons2 := map[string]*cephclient.MonInfo{ - "flim": {Name: "flim", Endpoint: "20.1.1.1:5000"}, - "flam": {Name: "flam", Endpoint: "20.1.1.2:5000"}, - "blam": {Name: "blam", Endpoint: "20.1.1.3:5000"}, - } - s, err = UpdateCsiClusterConfig(s, "beta", mons2) - assert.NoError(t, err) - cc, err = parseCsiClusterConfig(s) - assert.NoError(t, err) - assert.Equal(t, len(cc), 2) - assert.Equal(t, cc[0].ClusterID, "alpha") - assert.Contains(t, cc[0].Monitors, "1.2.3.4:5000") - assert.Contains(t, cc[0].Monitors, "10.11.12.13:5000") - assert.Equal(t, len(cc[0].Monitors), 2) - assert.Equal(t, cc[1].ClusterID, "beta") - assert.Contains(t, cc[1].Monitors, "20.1.1.1:5000") - assert.Contains(t, cc[1].Monitors, "20.1.1.2:5000") - assert.Contains(t, cc[1].Monitors, "20.1.1.3:5000") - assert.Equal(t, len(cc[1].Monitors), 3) - - // remove a mon from the 2nd cluster - delete(mons2, "blam") - s, err = UpdateCsiClusterConfig(s, "beta", mons2) - assert.NoError(t, err) - cc, err = parseCsiClusterConfig(s) - assert.NoError(t, err) - assert.Equal(t, len(cc), 2) - assert.Equal(t, cc[0].ClusterID, "alpha") - assert.Contains(t, cc[0].Monitors, "1.2.3.4:5000") - assert.Contains(t, cc[0].Monitors, "10.11.12.13:5000") - assert.Equal(t, len(cc[0].Monitors), 2) - assert.Equal(t, cc[1].ClusterID, "beta") - assert.Contains(t, cc[1].Monitors, "20.1.1.1:5000") - assert.Contains(t, cc[1].Monitors, "20.1.1.2:5000") - assert.Equal(t, len(cc[1].Monitors), 2) - - // does it return error on garbage input? - _, err = UpdateCsiClusterConfig("qqq", "beta", mons2) - assert.Error(t, err) -} diff --git a/pkg/operator/ceph/csi/csi.go b/pkg/operator/ceph/csi/csi.go deleted file mode 100644 index a9e2f0ee2..000000000 --- a/pkg/operator/ceph/csi/csi.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "strconv" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - controllerutil "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/kubernetes" -) - -func ValidateAndConfigureDrivers(context *clusterd.Context, namespace, rookImage, securityAccount string, serverVersion *version.Info, ownerInfo *k8sutil.OwnerInfo) { - csiLock.Lock() - defer csiLock.Unlock() - var ( - v *CephCSIVersion - err error - ) - - if err = setParams(context.Clientset); err != nil { - logger.Errorf("failed to configure CSI parameters. %v", err) - return - } - - if err = validateCSIParam(); err != nil { - logger.Errorf("failed to validate CSI parameters. %v", err) - return - } - - if !AllowUnsupported && CSIEnabled() { - if v, err = validateCSIVersion(context.Clientset, namespace, rookImage, securityAccount, ownerInfo); err != nil { - logger.Errorf("invalid csi version. %+v", err) - return - } - } else { - logger.Info("Skipping csi version check, since unsupported versions are allowed or csi is disabled") - } - - if CSIEnabled() { - maxRetries := 3 - for i := 0; i < maxRetries; i++ { - if err = startDrivers(context.Clientset, context.RookClientset, namespace, serverVersion, ownerInfo, v); err != nil { - logger.Errorf("failed to start Ceph csi drivers, will retry starting csi drivers %d more times. %v", maxRetries-i-1, err) - } else { - break - } - } - } - - stopDrivers(context.Clientset, namespace, serverVersion) -} - -func setParams(clientset kubernetes.Interface) error { - var err error - - csiEnableRBD, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_ENABLE_RBD", "true") - if err != nil { - return errors.Wrap(err, "unable to determine if CSI driver for RBD is enabled") - } - if EnableRBD, err = strconv.ParseBool(csiEnableRBD); err != nil { - return errors.Wrap(err, "unable to parse value for 'ROOK_CSI_ENABLE_RBD'") - } - - csiEnableCephFS, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_ENABLE_CEPHFS", "true") - if err != nil { - return errors.Wrap(err, "unable to determine if CSI driver for CephFS is enabled") - } - if EnableCephFS, err = strconv.ParseBool(csiEnableCephFS); err != nil { - return errors.Wrap(err, "unable to parse value for 'ROOK_CSI_ENABLE_CEPHFS'") - } - - csiAllowUnsupported, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_ALLOW_UNSUPPORTED_VERSION", "false") - if err != nil { - return errors.Wrap(err, "unable to determine if unsupported version is allowed") - } - if AllowUnsupported, err = strconv.ParseBool(csiAllowUnsupported); err != nil { - return errors.Wrap(err, "unable to parse value for 'ROOK_CSI_ALLOW_UNSUPPORTED_VERSION'") - } - - csiEnableCSIGRPCMetrics, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_ENABLE_GRPC_METRICS", "false") - if err != nil { - return errors.Wrap(err, "unable to determine if CSI GRPC metrics is enabled") - } - if EnableCSIGRPCMetrics, err = strconv.ParseBool(csiEnableCSIGRPCMetrics); err != nil { - return errors.Wrap(err, "unable to parse value for 'ROOK_CSI_ENABLE_GRPC_METRICS'") - } - - csiEnableCSIHostNetwork, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_ENABLE_HOST_NETWORK", "true") - if err != nil { - return errors.Wrap(err, "failed to determine if CSI Host Network is enabled") - } - if CSIParam.EnableCSIHostNetwork, err = strconv.ParseBool(csiEnableCSIHostNetwork); err != nil { - return errors.Wrap(err, "failed to parse value for 'CSI_ENABLE_HOST_NETWORK'") - } - - CSIParam.CSIPluginImage, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_CEPH_IMAGE", DefaultCSIPluginImage) - if err != nil { - return errors.Wrap(err, "unable to configure CSI plugin image") - } - CSIParam.RegistrarImage, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_REGISTRAR_IMAGE", DefaultRegistrarImage) - if err != nil { - return errors.Wrap(err, "unable to configure CSI registrar image") - } - CSIParam.ProvisionerImage, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_PROVISIONER_IMAGE", DefaultProvisionerImage) - if err != nil { - return errors.Wrap(err, "unable to configure CSI provisioner image") - } - CSIParam.AttacherImage, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_ATTACHER_IMAGE", DefaultAttacherImage) - if err != nil { - return errors.Wrap(err, "unable to configure CSI attacher image") - } - CSIParam.SnapshotterImage, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_SNAPSHOTTER_IMAGE", DefaultSnapshotterImage) - if err != nil { - return errors.Wrap(err, "unable to configure CSI snapshotter image") - } - CSIParam.KubeletDirPath, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_KUBELET_DIR_PATH", DefaultKubeletDirPath) - if err != nil { - return errors.Wrap(err, "unable to configure CSI kubelet directory path") - } - CSIParam.VolumeReplicationImage, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_VOLUME_REPLICATION_IMAGE", DefaultVolumeReplicationImage) - if err != nil { - return errors.Wrap(err, "unable to configure Volume replication controller image") - } - - csiCephFSPodLabels, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_CEPHFS_POD_LABELS", "") - if err != nil { - return errors.Wrap(err, "unable to configure CSI CephFS pod labels") - } - CSIParam.CSICephFSPodLabels = k8sutil.ParseStringToLabels(csiCephFSPodLabels) - - csiRBDPodLabels, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_RBD_POD_LABELS", "") - if err != nil { - return errors.Wrap(err, "unable to configure CSI RBD pod labels") - } - CSIParam.CSIRBDPodLabels = k8sutil.ParseStringToLabels(csiRBDPodLabels) - - return nil -} diff --git a/pkg/operator/ceph/csi/csidriver.go b/pkg/operator/ceph/csi/csidriver.go deleted file mode 100644 index 610fa5d60..000000000 --- a/pkg/operator/ceph/csi/csidriver.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "context" - - "github.com/pkg/errors" - v1k8scsi "k8s.io/api/storage/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/kubernetes/typed/storage/v1" -) - -type csiDriver interface { - createCSIDriverInfo(ctx context.Context, clientset kubernetes.Interface, name, fsGroupPolicy string) error - reCreateCSIDriverInfo(ctx context.Context) error - deleteCSIDriverInfo(ctx context.Context, clientset kubernetes.Interface, name string) error -} - -type v1CsiDriver struct { - csiDriver *v1k8scsi.CSIDriver - csiClient v1.CSIDriverInterface -} - -// createCSIDriverInfo Registers CSI driver by creating a CSIDriver object -func (d v1CsiDriver) createCSIDriverInfo(ctx context.Context, clientset kubernetes.Interface, name, fsGroupPolicy string) error { - attach := true - mountInfo := false - // Create CSIDriver object - csiDriver := &v1k8scsi.CSIDriver{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: v1k8scsi.CSIDriverSpec{ - AttachRequired: &attach, - PodInfoOnMount: &mountInfo, - }, - } - if fsGroupPolicy != "" { - policy := v1k8scsi.FSGroupPolicy(fsGroupPolicy) - csiDriver.Spec.FSGroupPolicy = &policy - } - csidrivers := clientset.StorageV1().CSIDrivers() - driver, err := csidrivers.Get(ctx, name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - _, err = csidrivers.Create(ctx, csiDriver, metav1.CreateOptions{}) - if err != nil { - return err - } - logger.Infof("CSIDriver object created for driver %q", name) - } - return err - } - - // As FSGroupPolicy field is immutable, should be set only during create time. - // if the request is to change the FSGroupPolicy, we are deleting the CSIDriver object and creating it. - if driver.Spec.FSGroupPolicy != nil && csiDriver.Spec.FSGroupPolicy != nil && *driver.Spec.FSGroupPolicy != *csiDriver.Spec.FSGroupPolicy { - d.csiClient = csidrivers - d.csiDriver = csiDriver - return d.reCreateCSIDriverInfo(ctx) - } - - // For csidriver we need to provide the resourceVersion when updating the object. - // From the docs (https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata) - // > "This value MUST be treated as opaque by clients and passed unmodified back to the server" - csiDriver.ObjectMeta.ResourceVersion = driver.ObjectMeta.ResourceVersion - _, err = csidrivers.Update(ctx, csiDriver, metav1.UpdateOptions{}) - if err != nil { - return err - } - logger.Infof("CSIDriver object updated for driver %q", name) - return nil -} - -func (d v1CsiDriver) reCreateCSIDriverInfo(ctx context.Context) error { - err := d.csiClient.Delete(ctx, d.csiDriver.Name, metav1.DeleteOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to delete CSIDriver object for driver %q", d.csiDriver.Name) - } - logger.Infof("CSIDriver object deleted for driver %q", d.csiDriver.Name) - _, err = d.csiClient.Create(ctx, d.csiDriver, metav1.CreateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to recreate CSIDriver object for driver %q", d.csiDriver.Name) - } - logger.Infof("CSIDriver object recreated for driver %q", d.csiDriver.Name) - return nil -} - -// deleteCSIDriverInfo deletes CSIDriverInfo and returns the error if any -func (d v1CsiDriver) deleteCSIDriverInfo(ctx context.Context, clientset kubernetes.Interface, name string) error { - err := clientset.StorageV1().CSIDrivers().Delete(ctx, name, metav1.DeleteOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - logger.Debug("%s CSIDriver not found; skipping deletion.", name) - return nil - } - } - return err -} diff --git a/pkg/operator/ceph/csi/secrets.go b/pkg/operator/ceph/csi/secrets.go deleted file mode 100644 index 103560790..000000000 --- a/pkg/operator/ceph/csi/secrets.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// #nosec because of the word `Secret` -const ( - csiKeyringRBDProvisionerUsername = "client.csi-rbd-provisioner" - csiKeyringRBDNodeUsername = "client.csi-rbd-node" - CsiRBDNodeSecret = "rook-csi-rbd-node" - CsiRBDProvisionerSecret = "rook-csi-rbd-provisioner" -) - -// #nosec because of the word `Secret` -const ( - csiKeyringCephFSProvisionerUsername = "client.csi-cephfs-provisioner" - csiKeyringCephFSNodeUsername = "client.csi-cephfs-node" - CsiCephFSNodeSecret = "rook-csi-cephfs-node" - CsiCephFSProvisionerSecret = "rook-csi-cephfs-provisioner" -) - -func createCSIKeyringRBDNode(s *keyring.SecretStore) (string, error) { - key, err := s.GenerateKey(csiKeyringRBDNodeUsername, cephCSIKeyringRBDNodeCaps()) - if err != nil { - return "", err - } - - return key, nil -} - -func createCSIKeyringRBDProvisioner(s *keyring.SecretStore) (string, error) { - key, err := s.GenerateKey(csiKeyringRBDProvisionerUsername, cephCSIKeyringRBDProvisionerCaps()) - if err != nil { - return "", err - } - - return key, nil -} - -func createCSIKeyringCephFSNode(s *keyring.SecretStore) (string, error) { - key, err := s.GenerateKey(csiKeyringCephFSNodeUsername, cephCSIKeyringCephFSNodeCaps()) - if err != nil { - return "", err - } - - return key, nil -} - -func createCSIKeyringCephFSProvisioner(s *keyring.SecretStore) (string, error) { - key, err := s.GenerateKey(csiKeyringCephFSProvisionerUsername, cephCSIKeyringCephFSProvisionerCaps()) - if err != nil { - return "", err - } - - return key, nil -} - -func cephCSIKeyringRBDNodeCaps() []string { - return []string{ - "mon", "profile rbd", - "mgr", "allow rw", - "osd", "profile rbd", - } -} - -func cephCSIKeyringRBDProvisionerCaps() []string { - return []string{ - "mon", "profile rbd", - "mgr", "allow rw", - "osd", "profile rbd", - } -} - -func cephCSIKeyringCephFSNodeCaps() []string { - return []string{ - "mon", "allow r", - "mgr", "allow rw", - "osd", "allow rw tag cephfs *=*", - "mds", "allow rw", - } -} - -func cephCSIKeyringCephFSProvisionerCaps() []string { - return []string{ - "mon", "allow r", - "mgr", "allow rw", - "osd", "allow rw tag cephfs metadata=*", - } -} - -func createOrUpdateCSISecret(clusterInfo *client.ClusterInfo, csiRBDProvisionerSecretKey, csiRBDNodeSecretKey, csiCephFSProvisionerSecretKey, csiCephFSNodeSecretKey string, k *keyring.SecretStore) error { - csiRBDProvisionerSecrets := map[string][]byte{ - // userID is expected for the rbd provisioner driver - "userID": []byte("csi-rbd-provisioner"), - "userKey": []byte(csiRBDProvisionerSecretKey), - } - - csiRBDNodeSecrets := map[string][]byte{ - // userID is expected for the rbd node driver - "userID": []byte("csi-rbd-node"), - "userKey": []byte(csiRBDNodeSecretKey), - } - - csiCephFSProvisionerSecrets := map[string][]byte{ - // adminID is expected for the cephfs provisioner driver - "adminID": []byte("csi-cephfs-provisioner"), - "adminKey": []byte(csiCephFSProvisionerSecretKey), - } - - csiCephFSNodeSecrets := map[string][]byte{ - // adminID is expected for the cephfs node driver - "adminID": []byte("csi-cephfs-node"), - "adminKey": []byte(csiCephFSNodeSecretKey), - } - - keyringSecretMap := make(map[string]map[string][]byte) - keyringSecretMap[CsiRBDProvisionerSecret] = csiRBDProvisionerSecrets - keyringSecretMap[CsiRBDNodeSecret] = csiRBDNodeSecrets - keyringSecretMap[CsiCephFSProvisionerSecret] = csiCephFSProvisionerSecrets - keyringSecretMap[CsiCephFSNodeSecret] = csiCephFSNodeSecrets - - for secretName, secret := range keyringSecretMap { - s := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: clusterInfo.Namespace, - }, - Data: secret, - Type: k8sutil.RookType, - } - err := clusterInfo.OwnerInfo.SetControllerReference(s) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to keyring secret %q", secretName) - } - - // Create Kubernetes Secret - err = k.CreateSecret(s) - if err != nil { - return errors.Wrapf(err, "failed to create kubernetes secret %q for cluster %q", secret, clusterInfo.Namespace) - } - - } - - logger.Infof("created kubernetes csi secrets for cluster %q", clusterInfo.Namespace) - return nil -} - -// CreateCSISecrets creates all the Kubernetes CSI Secrets -func CreateCSISecrets(context *clusterd.Context, clusterInfo *client.ClusterInfo) error { - k := keyring.GetSecretStore(context, clusterInfo, clusterInfo.OwnerInfo) - - // Create CSI RBD Provisioner Ceph key - csiRBDProvisionerSecretKey, err := createCSIKeyringRBDProvisioner(k) - if err != nil { - return errors.Wrap(err, "failed to create csi rbd provisioner ceph keyring") - } - - // Create CSI RBD Node Ceph key - csiRBDNodeSecretKey, err := createCSIKeyringRBDNode(k) - if err != nil { - return errors.Wrap(err, "failed to create csi rbd node ceph keyring") - } - - // Create CSI Cephfs provisioner Ceph key - csiCephFSProvisionerSecretKey, err := createCSIKeyringCephFSProvisioner(k) - if err != nil { - return errors.Wrap(err, "failed to create csi cephfs provisioner ceph keyring") - } - - // Create CSI Cephfs node Ceph key - csiCephFSNodeSecretKey, err := createCSIKeyringCephFSNode(k) - if err != nil { - return errors.Wrap(err, "failed to create csi cephfs node ceph keyring") - } - - // Create or update Kubernetes CSI secret - if err := createOrUpdateCSISecret(clusterInfo, csiRBDProvisionerSecretKey, csiRBDNodeSecretKey, csiCephFSProvisionerSecretKey, csiCephFSNodeSecretKey, k); err != nil { - return errors.Wrap(err, "failed to create kubernetes csi secret") - } - - return nil -} diff --git a/pkg/operator/ceph/csi/secrets_test.go b/pkg/operator/ceph/csi/secrets_test.go deleted file mode 100644 index 3120ff23c..000000000 --- a/pkg/operator/ceph/csi/secrets_test.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestCephCSIKeyringRBDNodeCaps(t *testing.T) { - caps := cephCSIKeyringRBDNodeCaps() - assert.Equal(t, caps, []string{"mon", "profile rbd", "mgr", "allow rw", "osd", "profile rbd"}) -} - -func TestCephCSIKeyringRBDProvisionerCaps(t *testing.T) { - caps := cephCSIKeyringRBDProvisionerCaps() - assert.Equal(t, caps, []string{"mon", "profile rbd", "mgr", "allow rw", "osd", "profile rbd"}) -} - -func TestCephCSIKeyringCephFSNodeCaps(t *testing.T) { - caps := cephCSIKeyringCephFSNodeCaps() - assert.Equal(t, caps, []string{"mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"}) -} - -func TestCephCSIKeyringCephFSProvisionerCaps(t *testing.T) { - caps := cephCSIKeyringCephFSProvisionerCaps() - assert.Equal(t, caps, []string{"mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"}) -} diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go deleted file mode 100644 index 3f1bab1e8..000000000 --- a/pkg/operator/ceph/csi/spec.go +++ /dev/null @@ -1,742 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "context" - "fmt" - "strconv" - "strings" - "sync" - "time" - - rookclient "github.com/rook/rook/pkg/client/clientset/versioned" - controllerutil "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/k8sutil/cmdreporter" - - "github.com/pkg/errors" - apps "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - k8scsi "k8s.io/api/storage/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/kubernetes" -) - -type Param struct { - CSIPluginImage string - RegistrarImage string - ProvisionerImage string - AttacherImage string - SnapshotterImage string - ResizerImage string - DriverNamePrefix string - EnableCSIGRPCMetrics string - KubeletDirPath string - ForceCephFSKernelClient string - CephFSPluginUpdateStrategy string - RBDPluginUpdateStrategy string - PluginPriorityClassName string - ProvisionerPriorityClassName string - VolumeReplicationImage string - EnableCSIHostNetwork bool - EnableOMAPGenerator bool - EnableRBDSnapshotter bool - EnableCephFSSnapshotter bool - EnableVolumeReplicationSideCar bool - LogLevel uint8 - CephFSGRPCMetricsPort uint16 - CephFSLivenessMetricsPort uint16 - RBDGRPCMetricsPort uint16 - RBDLivenessMetricsPort uint16 - ProvisionerReplicas uint8 - CSICephFSPodLabels map[string]string - CSIRBDPodLabels map[string]string -} - -type templateParam struct { - Param - // non-global template only parameters - Namespace string -} - -var ( - CSIParam Param - - EnableRBD = false - EnableCephFS = false - EnableCSIGRPCMetrics = false - AllowUnsupported = false - - //driver names - CephFSDriverName string - RBDDriverName string - - // template paths - RBDPluginTemplatePath string - RBDProvisionerDepTemplatePath string - - CephFSPluginTemplatePath string - CephFSProvisionerDepTemplatePath string - - // configuration map for csi - ConfigName = "rook-ceph-csi-config" - ConfigKey = "csi-cluster-config-json" - - csiLock sync.Mutex - csiDriverobj csiDriver -) - -// Specify default images as var instead of const so that they can be overridden with the Go -// linker's -X flag. This allows users to easily build images with a different opinionated set of -// images without having to specify them manually in charts/manifests which can make upgrades more -// manually challenging. -var ( - // image names - DefaultCSIPluginImage = "quay.io/cephcsi/cephcsi:v3.4.0" - DefaultRegistrarImage = "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0" - DefaultProvisionerImage = "k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2" - DefaultAttacherImage = "k8s.gcr.io/sig-storage/csi-attacher:v3.2.1" - DefaultSnapshotterImage = "k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1" - DefaultResizerImage = "k8s.gcr.io/sig-storage/csi-resizer:v1.2.0" - DefaultVolumeReplicationImage = "quay.io/csiaddons/volumereplication-operator:v0.1.0" -) - -const ( - KubeMinMajor = "1" - ProvDeploymentSuppVersion = "14" - kubeMinVerForFilesystemRestore = "15" - kubeMinVerForBlockRestore = "16" - kubeMinVerForSnapshot = "17" - kubeMinVerForV1csiDriver = "18" - kubeMaxVerForBeta1csiDriver = "21" - - // common tolerations and node affinity - provisionerTolerationsEnv = "CSI_PROVISIONER_TOLERATIONS" - provisionerNodeAffinityEnv = "CSI_PROVISIONER_NODE_AFFINITY" - pluginTolerationsEnv = "CSI_PLUGIN_TOLERATIONS" - pluginNodeAffinityEnv = "CSI_PLUGIN_NODE_AFFINITY" - - // CephFS tolerations and node affinity - cephFSProvisionerTolerationsEnv = "CSI_CEPHFS_PROVISIONER_TOLERATIONS" - cephFSProvisionerNodeAffinityEnv = "CSI_CEPHFS_PROVISIONER_NODE_AFFINITY" - cephFSPluginTolerationsEnv = "CSI_CEPHFS_PLUGIN_TOLERATIONS" - cephFSPluginNodeAffinityEnv = "CSI_CEPHFS_PLUGIN_NODE_AFFINITY" - - // RBD tolerations and node affinity - rbdProvisionerTolerationsEnv = "CSI_RBD_PROVISIONER_TOLERATIONS" - rbdProvisionerNodeAffinityEnv = "CSI_RBD_PROVISIONER_NODE_AFFINITY" - rbdPluginTolerationsEnv = "CSI_RBD_PLUGIN_TOLERATIONS" - rbdPluginNodeAffinityEnv = "CSI_RBD_PLUGIN_NODE_AFFINITY" - - // compute resource for CSI pods - rbdProvisionerResource = "CSI_RBD_PROVISIONER_RESOURCE" - rbdPluginResource = "CSI_RBD_PLUGIN_RESOURCE" - - cephFSProvisionerResource = "CSI_CEPHFS_PROVISIONER_RESOURCE" - cephFSPluginResource = "CSI_CEPHFS_PLUGIN_RESOURCE" - - // kubelet directory path - DefaultKubeletDirPath = "/var/lib/kubelet" - - // template - DefaultRBDPluginTemplatePath = "/etc/ceph-csi/rbd/csi-rbdplugin.yaml" - DefaultRBDProvisionerDepTemplatePath = "/etc/ceph-csi/rbd/csi-rbdplugin-provisioner-dep.yaml" - DefaultRBDPluginServiceTemplatePath = "/etc/ceph-csi/rbd/csi-rbdplugin-svc.yaml" - - DefaultCephFSPluginTemplatePath = "/etc/ceph-csi/cephfs/csi-cephfsplugin.yaml" - DefaultCephFSProvisionerDepTemplatePath = "/etc/ceph-csi/cephfs/csi-cephfsplugin-provisioner-dep.yaml" - DefaultCephFSPluginServiceTemplatePath = "/etc/ceph-csi/cephfs/csi-cephfsplugin-svc.yaml" - - // grpc metrics and liveness port for cephfs and rbd - DefaultCephFSGRPCMerticsPort uint16 = 9091 - DefaultCephFSLivenessMerticsPort uint16 = 9081 - DefaultRBDGRPCMerticsPort uint16 = 9090 - DefaultRBDLivenessMerticsPort uint16 = 9080 - - detectCSIVersionName = "rook-ceph-csi-detect-version" - // default log level for csi containers - defaultLogLevel uint8 = 0 - - // update strategy - rollingUpdate = "RollingUpdate" - onDelete = "OnDelete" - - // driver daemonset names - csiRBDPlugin = "csi-rbdplugin" - csiCephFSPlugin = "csi-cephfsplugin" - - // driver deployment names - csiRBDProvisioner = "csi-rbdplugin-provisioner" - csiCephFSProvisioner = "csi-cephfsplugin-provisioner" -) - -func CSIEnabled() bool { - return EnableRBD || EnableCephFS -} - -func validateCSIParam() error { - - if len(CSIParam.CSIPluginImage) == 0 { - return errors.New("missing csi rbd plugin image") - } - if len(CSIParam.RegistrarImage) == 0 { - return errors.New("missing csi registrar image") - } - if len(CSIParam.ProvisionerImage) == 0 { - return errors.New("missing csi provisioner image") - } - if len(CSIParam.AttacherImage) == 0 { - return errors.New("missing csi attacher image") - } - - if EnableRBD { - if len(RBDPluginTemplatePath) == 0 { - return errors.New("missing rbd plugin template path") - } - if len(RBDProvisionerDepTemplatePath) == 0 { - return errors.New("missing rbd provisioner template path") - } - } - - if EnableCephFS { - if len(CephFSPluginTemplatePath) == 0 { - return errors.New("missing cephfs plugin template path") - } - if len(CephFSProvisionerDepTemplatePath) == 0 { - return errors.New("missing ceph provisioner template path") - } - } - return nil -} - -func startDrivers(clientset kubernetes.Interface, rookclientset rookclient.Interface, namespace string, ver *version.Info, ownerInfo *k8sutil.OwnerInfo, v *CephCSIVersion) error { - ctx := context.TODO() - var ( - err error - rbdPlugin, cephfsPlugin *apps.DaemonSet - rbdProvisionerDeployment, cephfsProvisionerDeployment *apps.Deployment - rbdService, cephfsService *corev1.Service - ) - - tp := templateParam{ - Param: CSIParam, - Namespace: namespace, - } - // if the user didn't specify a custom DriverNamePrefix use - // the namespace (and a dot). - if tp.DriverNamePrefix == "" { - tp.DriverNamePrefix = fmt.Sprintf("%s.", namespace) - } - - CephFSDriverName = tp.DriverNamePrefix + "cephfs.csi.ceph.com" - RBDDriverName = tp.DriverNamePrefix + "rbd.csi.ceph.com" - - csiDriverobj = beta1CsiDriver{} - if ver.Major > KubeMinMajor || ver.Major == KubeMinMajor && ver.Minor >= kubeMinVerForV1csiDriver { - csiDriverobj = v1CsiDriver{} - // In case of an k8s version upgrade, delete the beta CSIDriver object; - // before the creation of updated v1 object to avoid conflicts. - // Also, attempt betav1 driver object deletion only if version is less - // than maximum supported version for betav1 object.(unavailable in v1.22+) - // Ignore if not found. - if EnableRBD && ver.Minor <= kubeMaxVerForBeta1csiDriver { - err = beta1CsiDriver{}.deleteCSIDriverInfo(ctx, clientset, RBDDriverName) - if err != nil { - logger.Errorf("failed to delete %q Driver Info. %v", RBDDriverName, err) - } - } - if EnableCephFS && ver.Minor <= kubeMaxVerForBeta1csiDriver { - err = beta1CsiDriver{}.deleteCSIDriverInfo(ctx, clientset, CephFSDriverName) - if err != nil { - logger.Errorf("failed to delete %q Driver Info. %v", CephFSDriverName, err) - } - } - } - - tp.EnableCSIGRPCMetrics = fmt.Sprintf("%t", EnableCSIGRPCMetrics) - - // If not set or set to anything but "false", the kernel client will be enabled - kClient, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_FORCE_CEPHFS_KERNEL_CLIENT", "true") - if err != nil { - return errors.Wrap(err, "failed to load CSI_FORCE_CEPHFS_KERNEL_CLIENT setting") - } - if strings.EqualFold(kClient, "false") { - tp.ForceCephFSKernelClient = "false" - } else { - tp.ForceCephFSKernelClient = "true" - } - // parse GRPC and Liveness ports - tp.CephFSGRPCMetricsPort, err = getPortFromConfig(clientset, "CSI_CEPHFS_GRPC_METRICS_PORT", DefaultCephFSGRPCMerticsPort) - if err != nil { - return errors.Wrap(err, "error getting CSI CephFS GRPC metrics port.") - } - tp.CephFSLivenessMetricsPort, err = getPortFromConfig(clientset, "CSI_CEPHFS_LIVENESS_METRICS_PORT", DefaultCephFSLivenessMerticsPort) - if err != nil { - return errors.Wrap(err, "error getting CSI CephFS liveness metrics port.") - } - - tp.RBDGRPCMetricsPort, err = getPortFromConfig(clientset, "CSI_RBD_GRPC_METRICS_PORT", DefaultRBDGRPCMerticsPort) - if err != nil { - return errors.Wrap(err, "error getting CSI RBD GRPC metrics port.") - } - tp.RBDLivenessMetricsPort, err = getPortFromConfig(clientset, "CSI_RBD_LIVENESS_METRICS_PORT", DefaultRBDLivenessMerticsPort) - if err != nil { - return errors.Wrap(err, "error getting CSI RBD liveness metrics port.") - } - - // default value `system-node-critical` is the highest available priority - tp.PluginPriorityClassName, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_PLUGIN_PRIORITY_CLASSNAME", "") - if err != nil { - return errors.Wrap(err, "failed to load CSI_PLUGIN_PRIORITY_CLASSNAME setting") - } - - // default value `system-cluster-critical` is applied for some - // critical pods in cluster but less priority than plugin pods - tp.ProvisionerPriorityClassName, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_PROVISIONER_PRIORITY_CLASSNAME", "") - if err != nil { - return errors.Wrap(err, "failed to load CSI_PROVISIONER_PRIORITY_CLASSNAME setting") - } - - // OMAP generator will be enabled by default - // If AllowUnsupported is set to false and if CSI version is less than - // <3.2.0 disable OMAP generator sidecar - if !v.SupportsOMAPController() { - tp.EnableOMAPGenerator = false - } - - enableOMAPGenerator, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_ENABLE_OMAP_GENERATOR", "false") - if err != nil { - return errors.Wrap(err, "failed to load CSI_ENABLE_OMAP_GENERATOR setting") - } - if strings.EqualFold(enableOMAPGenerator, "true") { - tp.EnableOMAPGenerator = true - } - - // if k8s >= v1.17 enable RBD and CephFS snapshotter by default - if ver.Major == KubeMinMajor && ver.Minor >= kubeMinVerForSnapshot { - tp.EnableRBDSnapshotter = true - tp.EnableCephFSSnapshotter = true - } - enableRBDSnapshotter, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_ENABLE_RBD_SNAPSHOTTER", "true") - if err != nil { - return errors.Wrap(err, "failed to load CSI_ENABLE_RBD_SNAPSHOTTER setting") - } - if strings.EqualFold(enableRBDSnapshotter, "false") { - tp.EnableRBDSnapshotter = false - } - enableCephFSSnapshotter, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_ENABLE_CEPHFS_SNAPSHOTTER", "true") - if err != nil { - return errors.Wrap(err, "failed to load CSI_ENABLE_CEPHFS_SNAPSHOTTER setting") - } - if strings.EqualFold(enableCephFSSnapshotter, "false") { - tp.EnableCephFSSnapshotter = false - } - - tp.EnableVolumeReplicationSideCar = false - enableVolumeReplicationSideCar, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_ENABLE_VOLUME_REPLICATION", "false") - if err != nil { - return errors.Wrap(err, "failed to load CSI_ENABLE_VOLUME_REPLICATION setting") - } - if strings.EqualFold(enableVolumeReplicationSideCar, "true") { - tp.EnableVolumeReplicationSideCar = true - } - - updateStrategy, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY", rollingUpdate) - if err != nil { - return errors.Wrap(err, "failed to load CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY setting") - } - if strings.EqualFold(updateStrategy, onDelete) { - tp.CephFSPluginUpdateStrategy = onDelete - } else { - tp.CephFSPluginUpdateStrategy = rollingUpdate - } - - updateStrategy, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_RBD_PLUGIN_UPDATE_STRATEGY", rollingUpdate) - if err != nil { - return errors.Wrap(err, "failed to load CSI_RBD_PLUGIN_UPDATE_STRATEGY setting") - } - if strings.EqualFold(updateStrategy, onDelete) { - tp.RBDPluginUpdateStrategy = onDelete - } else { - tp.RBDPluginUpdateStrategy = rollingUpdate - } - - logger.Infof("Kubernetes version is %s.%s", ver.Major, ver.Minor) - - tp.ResizerImage, err = k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "ROOK_CSI_RESIZER_IMAGE", DefaultResizerImage) - if err != nil { - return errors.Wrap(err, "failed to load ROOK_CSI_RESIZER_IMAGE setting") - } - if tp.ResizerImage == "" { - tp.ResizerImage = DefaultResizerImage - } - - if ver.Major == KubeMinMajor && ver.Minor < kubeMinVerForFilesystemRestore { - logger.Warning("CSI Filesystem volume expansion requires Kubernetes version >=1.15.0") - } - if ver.Major == KubeMinMajor && ver.Minor < kubeMinVerForBlockRestore { - logger.Warning("CSI Block volume expansion requires Kubernetes version >=1.16.0") - } - - logLevel, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_LOG_LEVEL", "") - if err != nil { - // logging a warning and intentionally continuing with the default log level - logger.Warningf("failed to load CSI_LOG_LEVEL. Defaulting to %d. %v", defaultLogLevel, err) - } - tp.LogLevel = defaultLogLevel - if logLevel != "" { - l, err := strconv.ParseUint(logLevel, 10, 8) - if err != nil { - logger.Errorf("failed to parse CSI_LOG_LEVEL. Defaulting to %d. %v", defaultLogLevel, err) - } else { - tp.LogLevel = uint8(l) - } - } - - tp.ProvisionerReplicas = 2 - nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - if err == nil { - if len(nodes.Items) == 1 { - tp.ProvisionerReplicas = 1 - } - } else { - logger.Errorf("failed to get nodes. Defaulting the number of replicas of provisioner pods to 2. %v", err) - } - - if EnableRBD { - rbdPlugin, err = templateToDaemonSet("rbdplugin", RBDPluginTemplatePath, tp) - if err != nil { - return errors.Wrap(err, "failed to load rbdplugin template") - } - - rbdProvisionerDeployment, err = templateToDeployment("rbd-provisioner", RBDProvisionerDepTemplatePath, tp) - if err != nil { - return errors.Wrap(err, "failed to load rbd provisioner deployment template") - } - - rbdService, err = templateToService("rbd-service", DefaultRBDPluginServiceTemplatePath, tp) - if err != nil { - return errors.Wrap(err, "failed to load rbd plugin service template") - } - rbdService.Namespace = namespace - } - if EnableCephFS { - cephfsPlugin, err = templateToDaemonSet("cephfsplugin", CephFSPluginTemplatePath, tp) - if err != nil { - return errors.Wrap(err, "failed to load CephFS plugin template") - } - - cephfsProvisionerDeployment, err = templateToDeployment("cephfs-provisioner", CephFSProvisionerDepTemplatePath, tp) - if err != nil { - return errors.Wrap(err, "failed to load rbd provisioner deployment template") - } - - cephfsService, err = templateToService("cephfs-service", DefaultCephFSPluginServiceTemplatePath, tp) - if err != nil { - return errors.Wrap(err, "failed to load cephfs plugin service template") - } - cephfsService.Namespace = namespace - } - - // get common provisioner tolerations and node affinity - provisionerTolerations := getToleration(clientset, provisionerTolerationsEnv, []corev1.Toleration{}) - provisionerNodeAffinity := getNodeAffinity(clientset, provisionerNodeAffinityEnv, &corev1.NodeAffinity{}) - // get common plugin tolerations and node affinity - pluginTolerations := getToleration(clientset, pluginTolerationsEnv, []corev1.Toleration{}) - pluginNodeAffinity := getNodeAffinity(clientset, pluginNodeAffinityEnv, &corev1.NodeAffinity{}) - - if rbdPlugin != nil { - // get RBD plugin tolerations and node affinity, defaults to common tolerations and node affinity if not specified - rbdPluginTolerations := getToleration(clientset, rbdPluginTolerationsEnv, pluginTolerations) - rbdPluginNodeAffinity := getNodeAffinity(clientset, rbdPluginNodeAffinityEnv, pluginNodeAffinity) - // apply RBD plugin tolerations and node affinity - applyToPodSpec(&rbdPlugin.Spec.Template.Spec, rbdPluginNodeAffinity, rbdPluginTolerations) - // apply resource request and limit to rbdplugin containers - applyResourcesToContainers(clientset, rbdPluginResource, &rbdPlugin.Spec.Template.Spec) - err = ownerInfo.SetControllerReference(rbdPlugin) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to rbd plugin daemonset %q", rbdPlugin.Name) - } - multusApplied, err := applyCephClusterNetworkConfig(ctx, &rbdPlugin.Spec.Template.ObjectMeta, rookclientset) - if err != nil { - return errors.Wrapf(err, "failed to apply network config to rbd plugin daemonset: %+v", rbdPlugin) - } - if multusApplied { - rbdPlugin.Spec.Template.Spec.HostNetwork = false - } - err = k8sutil.CreateDaemonSet(csiRBDPlugin, namespace, clientset, rbdPlugin) - if err != nil { - return errors.Wrapf(err, "failed to start rbdplugin daemonset: %+v", rbdPlugin) - } - k8sutil.AddRookVersionLabelToDaemonSet(rbdPlugin) - } - - if rbdProvisionerDeployment != nil { - // get RBD provisioner tolerations and node affinity, defaults to common tolerations and node affinity if not specified - rbdProvisionerTolerations := getToleration(clientset, rbdProvisionerTolerationsEnv, provisionerTolerations) - rbdProvisionerNodeAffinity := getNodeAffinity(clientset, rbdProvisionerNodeAffinityEnv, provisionerNodeAffinity) - // apply RBD provisioner tolerations and node affinity - applyToPodSpec(&rbdProvisionerDeployment.Spec.Template.Spec, rbdProvisionerNodeAffinity, rbdProvisionerTolerations) - // apply resource request and limit to rbd provisioner containers - applyResourcesToContainers(clientset, rbdProvisionerResource, &rbdProvisionerDeployment.Spec.Template.Spec) - err = ownerInfo.SetControllerReference(rbdProvisionerDeployment) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to rbd provisioner deployment %q", rbdProvisionerDeployment.Name) - } - antiAffinity := GetPodAntiAffinity("app", csiRBDProvisioner) - rbdProvisionerDeployment.Spec.Template.Spec.Affinity.PodAntiAffinity = &antiAffinity - rbdProvisionerDeployment.Spec.Strategy = apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - } - - _, err = applyCephClusterNetworkConfig(ctx, &rbdProvisionerDeployment.Spec.Template.ObjectMeta, rookclientset) - if err != nil { - return errors.Wrapf(err, "failed to apply network config to rbd plugin provisioner deployment: %+v", rbdProvisionerDeployment) - } - _, err = k8sutil.CreateOrUpdateDeployment(clientset, rbdProvisionerDeployment) - if err != nil { - return errors.Wrapf(err, "failed to start rbd provisioner deployment: %+v", rbdProvisionerDeployment) - } - k8sutil.AddRookVersionLabelToDeployment(rbdProvisionerDeployment) - logger.Info("successfully started CSI Ceph RBD driver") - } - - if rbdService != nil { - rbdService.Namespace = namespace - err = ownerInfo.SetControllerReference(rbdService) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to rbd service %q", rbdService) - } - _, err = k8sutil.CreateOrUpdateService(clientset, namespace, rbdService) - if err != nil { - return errors.Wrapf(err, "failed to create rbd service: %+v", rbdService) - } - } - - if cephfsPlugin != nil { - // get CephFS plugin tolerations and node affinity, defaults to common tolerations and node affinity if not specified - cephFSPluginTolerations := getToleration(clientset, cephFSPluginTolerationsEnv, pluginTolerations) - cephFSPluginNodeAffinity := getNodeAffinity(clientset, cephFSPluginNodeAffinityEnv, pluginNodeAffinity) - // apply CephFS plugin tolerations and node affinity - applyToPodSpec(&cephfsPlugin.Spec.Template.Spec, cephFSPluginNodeAffinity, cephFSPluginTolerations) - // apply resource request and limit to cephfs plugin containers - applyResourcesToContainers(clientset, cephFSPluginResource, &cephfsPlugin.Spec.Template.Spec) - err = ownerInfo.SetControllerReference(cephfsPlugin) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to cephfs plugin daemonset %q", cephfsPlugin.Name) - } - multusApplied, err := applyCephClusterNetworkConfig(ctx, &cephfsPlugin.Spec.Template.ObjectMeta, rookclientset) - if err != nil { - return errors.Wrapf(err, "failed to apply network config to cephfs plugin daemonset: %+v", cephfsPlugin) - } - if multusApplied { - cephfsPlugin.Spec.Template.Spec.HostNetwork = false - } - err = k8sutil.CreateDaemonSet(csiCephFSPlugin, namespace, clientset, cephfsPlugin) - if err != nil { - return errors.Wrapf(err, "failed to start cephfs plugin daemonset: %+v", cephfsPlugin) - } - k8sutil.AddRookVersionLabelToDaemonSet(cephfsPlugin) - } - - if cephfsProvisionerDeployment != nil { - // get CephFS provisioner tolerations and node affinity, defaults to common tolerations and node affinity if not specified - cephFSProvisionerTolerations := getToleration(clientset, cephFSProvisionerTolerationsEnv, provisionerTolerations) - cephFSProvisionerNodeAffinity := getNodeAffinity(clientset, cephFSProvisionerNodeAffinityEnv, provisionerNodeAffinity) - // apply CephFS provisioner tolerations and node affinity - applyToPodSpec(&cephfsProvisionerDeployment.Spec.Template.Spec, cephFSProvisionerNodeAffinity, cephFSProvisionerTolerations) - // get resource details for cephfs provisioner - // apply resource request and limit to cephfs provisioner containers - applyResourcesToContainers(clientset, cephFSProvisionerResource, &cephfsProvisionerDeployment.Spec.Template.Spec) - err = ownerInfo.SetControllerReference(cephfsProvisionerDeployment) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to cephfs provisioner deployment %q", cephfsProvisionerDeployment.Name) - } - antiAffinity := GetPodAntiAffinity("app", csiCephFSProvisioner) - cephfsProvisionerDeployment.Spec.Template.Spec.Affinity.PodAntiAffinity = &antiAffinity - cephfsProvisionerDeployment.Spec.Strategy = apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - } - - _, err = applyCephClusterNetworkConfig(ctx, &cephfsProvisionerDeployment.Spec.Template.ObjectMeta, rookclientset) - if err != nil { - return errors.Wrapf(err, "failed to apply network config to cephfs plugin provisioner deployment: %+v", cephfsProvisionerDeployment) - } - _, err = k8sutil.CreateOrUpdateDeployment(clientset, cephfsProvisionerDeployment) - if err != nil { - return errors.Wrapf(err, "failed to start cephfs provisioner deployment: %+v", cephfsProvisionerDeployment) - } - k8sutil.AddRookVersionLabelToDeployment(cephfsProvisionerDeployment) - logger.Info("successfully started CSI CephFS driver") - } - if cephfsService != nil { - err = ownerInfo.SetControllerReference(cephfsService) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to cephfs service %q", cephfsService) - } - _, err = k8sutil.CreateOrUpdateService(clientset, namespace, cephfsService) - if err != nil { - return errors.Wrapf(err, "failed to create cephfs service: %+v", cephfsService) - } - } - - if EnableRBD { - fsGroupPolicyForRBD, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_RBD_FSGROUPPOLICY", string(k8scsi.ReadWriteOnceWithFSTypeFSGroupPolicy)) - if err != nil { - // logging a warning and intentionally continuing with the default log level - logger.Warningf("failed to parse CSI_RBD_FSGROUPPOLICY. Defaulting to %q. %v", k8scsi.ReadWriteOnceWithFSTypeFSGroupPolicy, err) - } - err = csiDriverobj.createCSIDriverInfo(ctx, clientset, RBDDriverName, fsGroupPolicyForRBD) - if err != nil { - return errors.Wrapf(err, "failed to create CSI driver object for %q", RBDDriverName) - } - } - if EnableCephFS { - fsGroupPolicyForCephFS, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_CEPHFS_FSGROUPPOLICY", string(k8scsi.ReadWriteOnceWithFSTypeFSGroupPolicy)) - if err != nil { - // logging a warning and intentionally continuing with the default - // log level - logger.Warningf("failed to parse CSI_CEPHFS_FSGROUPPOLICY. Defaulting to %q. %v", k8scsi.NoneFSGroupPolicy, err) - } - err = csiDriverobj.createCSIDriverInfo(ctx, clientset, CephFSDriverName, fsGroupPolicyForCephFS) - if err != nil { - return errors.Wrapf(err, "failed to create CSI driver object for %q", CephFSDriverName) - } - } - - return nil -} - -func stopDrivers(clientset kubernetes.Interface, namespace string, ver *version.Info) { - if !EnableRBD { - logger.Info("CSI Ceph RBD driver disabled") - succeeded := deleteCSIDriverResources(clientset, ver, namespace, csiRBDPlugin, csiRBDProvisioner, "csi-rbdplugin-metrics", RBDDriverName) - if succeeded { - logger.Info("successfully removed CSI Ceph RBD driver") - } else { - logger.Error("failed to remove CSI Ceph RBD driver") - } - } - - if !EnableCephFS { - logger.Info("CSI CephFS driver disabled") - succeeded := deleteCSIDriverResources(clientset, ver, namespace, csiCephFSPlugin, csiCephFSProvisioner, "csi-cephfsplugin-metrics", CephFSDriverName) - if succeeded { - logger.Info("successfully removed CSI CephFS driver") - } else { - logger.Error("failed to remove CSI CephFS driver") - } - } -} - -func deleteCSIDriverResources( - clientset kubernetes.Interface, ver *version.Info, namespace, daemonset, deployment, service, driverName string) bool { - ctx := context.TODO() - succeeded := true - csiDriverobj = beta1CsiDriver{} - if ver.Major > KubeMinMajor || ver.Major == KubeMinMajor && ver.Minor >= kubeMinVerForV1csiDriver { - csiDriverobj = v1CsiDriver{} - } - err := k8sutil.DeleteDaemonset(clientset, namespace, daemonset) - if err != nil { - logger.Errorf("failed to delete the %q. %v", daemonset, err) - succeeded = false - } - - err = k8sutil.DeleteDeployment(clientset, namespace, deployment) - if err != nil { - logger.Errorf("failed to delete the %q. %v", deployment, err) - succeeded = false - } - - err = k8sutil.DeleteService(clientset, namespace, service) - if err != nil { - logger.Errorf("failed to delete the %q. %v", service, err) - succeeded = false - } - - err = csiDriverobj.deleteCSIDriverInfo(ctx, clientset, driverName) - if err != nil { - logger.Errorf("failed to delete %q Driver Info. %v", driverName, err) - succeeded = false - } - return succeeded -} - -func applyCephClusterNetworkConfig(ctx context.Context, objectMeta *metav1.ObjectMeta, rookclientset rookclient.Interface) (bool, error) { - var isMultusApplied bool - cephClusters, err := rookclientset.CephV1().CephClusters(objectMeta.Namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - return false, errors.Wrap(err, "failed to find CephClusters") - } - for _, cephCluster := range cephClusters.Items { - if cephCluster.Spec.Network.IsMultus() { - err = k8sutil.ApplyMultus(cephCluster.Spec.Network, objectMeta) - if err != nil { - return false, errors.Wrapf(err, "failed to apply multus configuration to CephCluster %q", cephCluster.Name) - } - isMultusApplied = true - } - } - - return isMultusApplied, nil -} - -// ValidateCSIVersion checks if the configured ceph-csi image is supported -func validateCSIVersion(clientset kubernetes.Interface, namespace, rookImage, serviceAccountName string, ownerInfo *k8sutil.OwnerInfo) (*CephCSIVersion, error) { - timeout := 15 * time.Minute - - logger.Infof("detecting the ceph csi image version for image %q", CSIParam.CSIPluginImage) - - versionReporter, err := cmdreporter.New( - clientset, - ownerInfo, - detectCSIVersionName, detectCSIVersionName, namespace, - []string{"cephcsi"}, []string{"--version"}, - rookImage, CSIParam.CSIPluginImage) - - if err != nil { - return nil, errors.Wrap(err, "failed to set up ceph CSI version job") - } - - job := versionReporter.Job() - job.Spec.Template.Spec.ServiceAccountName = serviceAccountName - - // Apply csi provisioner toleration for csi version check job - job.Spec.Template.Spec.Tolerations = getToleration(clientset, provisionerTolerationsEnv, []corev1.Toleration{}) - stdout, _, retcode, err := versionReporter.Run(timeout) - if err != nil { - return nil, errors.Wrap(err, "failed to complete ceph CSI version job") - } - - if retcode != 0 { - return nil, errors.Errorf("ceph CSI version job returned %d", retcode) - } - - version, err := extractCephCSIVersion(stdout) - if err != nil { - return nil, errors.Wrap(err, "failed to extract ceph CSI version") - } - logger.Infof("Detected ceph CSI image version: %q", version) - - if !version.Supported() { - return nil, errors.Errorf("ceph CSI image needs to be at least version %q", minimum.String()) - } - return version, nil -} diff --git a/pkg/operator/ceph/csi/spec_test.go b/pkg/operator/ceph/csi/spec_test.go deleted file mode 100644 index 272fcd417..000000000 --- a/pkg/operator/ceph/csi/spec_test.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "testing" - - rookfake "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/test" - - "github.com/stretchr/testify/assert" -) - -func TestStartCSI(t *testing.T) { - RBDPluginTemplatePath = "csi-rbdplugin.yaml" - RBDProvisionerDepTemplatePath = "csi-rbdplugin-provisioner-dep.yaml" - CephFSPluginTemplatePath = "csi-cephfsplugin.yaml" - CephFSProvisionerDepTemplatePath = "csi-cephfsplugin-provisioner-dep.yaml" - - CSIParam = Param{ - CSIPluginImage: "image", - RegistrarImage: "image", - ProvisionerImage: "image", - AttacherImage: "image", - SnapshotterImage: "image", - } - clientset := test.New(t, 3) - context := &clusterd.Context{ - Clientset: clientset, - RookClientset: rookfake.NewSimpleClientset(), - } - serverVersion, err := clientset.Discovery().ServerVersion() - if err != nil { - assert.Nil(t, err) - } - AllowUnsupported = true - err = startDrivers(context.Clientset, context.RookClientset, "ns", serverVersion, nil, nil) - assert.Nil(t, err) -} diff --git a/pkg/operator/ceph/csi/util.go b/pkg/operator/ceph/csi/util.go deleted file mode 100644 index 4d3b2bf01..000000000 --- a/pkg/operator/ceph/csi/util.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "bytes" - "io/ioutil" - "path/filepath" - "strconv" - "strings" - "text/template" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/operator/ceph/controller" - k8sutil "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -func loadTemplate(name, templatePath string, p templateParam) (string, error) { - b, err := ioutil.ReadFile(filepath.Clean(templatePath)) - if err != nil { - return "", err - } - data := string(b) - var writer bytes.Buffer - t := template.New(name) - t, err = t.Parse(data) - if err != nil { - return "", errors.Wrapf(err, "failed to parse template %v", name) - } - err = t.Execute(&writer, p) - return writer.String(), err -} - -func templateToService(name, templatePath string, p templateParam) (*corev1.Service, error) { - var svc corev1.Service - t, err := loadTemplate(name, templatePath, p) - if err != nil { - return nil, errors.Wrap(err, "failed to load service template") - } - - err = yaml.Unmarshal([]byte(t), &svc) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal service template") - } - return &svc, nil -} - -func templateToDaemonSet(name, templatePath string, p templateParam) (*apps.DaemonSet, error) { - var ds apps.DaemonSet - t, err := loadTemplate(name, templatePath, p) - if err != nil { - return nil, errors.Wrap(err, "failed to load daemonset template") - } - - err = yaml.Unmarshal([]byte(t), &ds) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal daemonset template") - } - return &ds, nil -} - -func templateToDeployment(name, templatePath string, p templateParam) (*apps.Deployment, error) { - var ds apps.Deployment - t, err := loadTemplate(name, templatePath, p) - if err != nil { - return nil, errors.Wrap(err, "failed to load deployment template") - } - - err = yaml.Unmarshal([]byte(t), &ds) - if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal deployment template") - } - return &ds, nil -} - -func applyResourcesToContainers(clientset kubernetes.Interface, key string, podspec *corev1.PodSpec) { - resource := getComputeResource(clientset, key) - if len(resource) > 0 { - for i, c := range podspec.Containers { - for _, r := range resource { - if c.Name == r.Name { - podspec.Containers[i].Resources = r.Resource - } - } - } - } -} - -func getComputeResource(clientset kubernetes.Interface, key string) []k8sutil.ContainerResource { - // Add Resource list if any - resource := []k8sutil.ContainerResource{} - resourceRaw := "" - var err error - - resourceRaw, err = k8sutil.GetOperatorSetting(clientset, controller.OperatorSettingConfigMapName, key, "") - - if err != nil { - logger.Warningf("resource requirement for %q will not be applied. %v", key, err) - } - - if resourceRaw != "" { - resource, err = k8sutil.YamlToContainerResource(resourceRaw) - if err != nil { - logger.Warningf("failed to parse %q. %v", resourceRaw, err) - } - } - return resource -} - -func getToleration(clientset kubernetes.Interface, tolerationsName string, defaultTolerations []corev1.Toleration) []corev1.Toleration { - // Add toleration if any, otherwise return defaultTolerations - tolerationsRaw, err := k8sutil.GetOperatorSetting(clientset, controller.OperatorSettingConfigMapName, tolerationsName, "") - if err != nil { - logger.Warningf("failed to read %q. %v", tolerationsName, err) - return defaultTolerations - } - if tolerationsRaw == "" { - return defaultTolerations - } - tolerations, err := k8sutil.YamlToTolerations(tolerationsRaw) - if err != nil { - logger.Warningf("failed to parse %q for %q. %v", tolerationsRaw, tolerationsName, err) - return defaultTolerations - } - for i := range tolerations { - if tolerations[i].Key == "" { - tolerations[i].Operator = corev1.TolerationOpExists - } - - if tolerations[i].Operator == corev1.TolerationOpExists { - tolerations[i].Value = "" - } - } - return tolerations -} - -func getNodeAffinity(clientset kubernetes.Interface, nodeAffinityName string, defaultNodeAffinity *corev1.NodeAffinity) *corev1.NodeAffinity { - // Add NodeAffinity if any, otherwise return defaultNodeAffinity - nodeAffinity, err := k8sutil.GetOperatorSetting(clientset, controller.OperatorSettingConfigMapName, nodeAffinityName, "") - if err != nil { - logger.Warningf("failed to read %q. %v", nodeAffinityName, err) - return defaultNodeAffinity - } - if nodeAffinity == "" { - return defaultNodeAffinity - } - v1NodeAffinity, err := k8sutil.GenerateNodeAffinity(nodeAffinity) - if err != nil { - logger.Warningf("failed to parse %q for %q. %v", nodeAffinity, nodeAffinityName, err) - return defaultNodeAffinity - } - return v1NodeAffinity -} - -func applyToPodSpec(pod *corev1.PodSpec, n *corev1.NodeAffinity, t []corev1.Toleration) { - pod.Tolerations = t - pod.Affinity = &corev1.Affinity{ - NodeAffinity: n, - } -} - -func getPortFromConfig(clientset kubernetes.Interface, env string, defaultPort uint16) (uint16, error) { - port, err := k8sutil.GetOperatorSetting(clientset, controller.OperatorSettingConfigMapName, env, strconv.Itoa(int(defaultPort))) - if err != nil { - return defaultPort, errors.Wrapf(err, "failed to load value for %q.", env) - } - if strings.TrimSpace(port) == "" { - return defaultPort, nil - } - p, err := strconv.ParseUint(port, 10, 64) - if err != nil { - return defaultPort, errors.Wrapf(err, "failed to parse port value for %q.", env) - } - if p > 65535 { - return defaultPort, errors.Errorf("%s port value is greater than 65535 for %s.", port, env) - } - return uint16(p), nil -} - -// Get PodAntiAffinity from a key and value pair -func GetPodAntiAffinity(key, value string) corev1.PodAntiAffinity { - return corev1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ - { - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: key, - Operator: metav1.LabelSelectorOpIn, - Values: []string{value}, - }, - }, - }, - TopologyKey: corev1.LabelHostname, - }, - }, - } -} diff --git a/pkg/operator/ceph/csi/util_test.go b/pkg/operator/ceph/csi/util_test.go deleted file mode 100644 index d211afd97..000000000 --- a/pkg/operator/ceph/csi/util_test.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "k8s.io/client-go/kubernetes/fake" -) - -var ( - testDSTemplate = []byte(` -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: test-label - namespace: {{ .Namespace }} -spec: - selector: - matchLabels: - app: test-label - template: - metadata: - labels: - app: test-label - spec: - serviceAccount: test-sa - containers: - - name: registrar - image: {{ .RegistrarImage }} - - name: rbdplugin - image: {{ .CSIPluginImage }} - - name: cephfsplugin - image: {{ .CSIPluginImage }} -`) - testDepTemplate = []byte(` -kind: Deployment -apiVersion: apps/v1 -metadata: - name: test-label - namespace: {{ .Namespace }} -spec: - replicas: {{ .ProvisionerReplicas }} - selector: - matchLabels: - app: test-label - template: - metadata: - labels: - app: test-label - spec: - serviceAccount: test-sa - containers: - - name: csi-attacher - image: {{ .AttacherImage }} - - name: csi-snapshotter - image: {{ .SnapshotterImage }} - - name: csi-resizer - image: {{ .ResizerImage }} - - name: csi-provisioner - image: {{ .ProvisionerImage }} - - name: csi-cephfsplugin - image: {{ .CSIPluginImage }} -`) -) - -func TestDaemonSetTemplate(t *testing.T) { - tmp, err := ioutil.TempFile("", "yaml") - assert.Nil(t, err) - - defer os.Remove(tmp.Name()) - - _, err = tmp.Write(testDSTemplate) - assert.Nil(t, err) - err = tmp.Close() - assert.Nil(t, err) - - tp := templateParam{ - Param: CSIParam, - Namespace: "foo", - } - _, err = templateToDaemonSet("test-ds", tmp.Name(), tp) - assert.Nil(t, err) -} - -func TestDeploymentTemplate(t *testing.T) { - tmp, err := ioutil.TempFile("", "yaml") - assert.Nil(t, err) - - defer os.Remove(tmp.Name()) - - _, err = tmp.Write(testDepTemplate) - assert.Nil(t, err) - err = tmp.Close() - assert.Nil(t, err) - - tp := templateParam{ - Param: CSIParam, - Namespace: "foo", - } - _, err = templateToDeployment("test-dep", tmp.Name(), tp) - assert.Nil(t, err) -} - -func Test_getPortFromConfig(t *testing.T) { - k8s := fake.NewSimpleClientset() - - var key = "TEST_CSI_PORT_ENV" - var defaultPort uint16 = 8000 - - // empty env variable - port, err := getPortFromConfig(k8s, key, defaultPort) - assert.Nil(t, err) - assert.Equal(t, port, defaultPort) - - // valid port is set in env - err = os.Setenv(key, "9000") - assert.Nil(t, err) - port, err = getPortFromConfig(k8s, key, defaultPort) - assert.Nil(t, err) - assert.Equal(t, port, uint16(9000)) - - err = os.Unsetenv(key) - assert.Nil(t, err) - // higher port value is set in env - err = os.Setenv(key, "65536") - assert.Nil(t, err) - port, err = getPortFromConfig(k8s, key, defaultPort) - assert.Error(t, err) - assert.Equal(t, port, defaultPort) - - err = os.Unsetenv(key) - assert.Nil(t, err) - // negative port is set in env - err = os.Setenv(key, "-1") - assert.Nil(t, err) - port, err = getPortFromConfig(k8s, key, defaultPort) - assert.Error(t, err) - assert.Equal(t, port, defaultPort) - - err = os.Unsetenv(key) - assert.Nil(t, err) -} diff --git a/pkg/operator/ceph/csi/version.go b/pkg/operator/ceph/csi/version.go deleted file mode 100644 index 685a488f2..000000000 --- a/pkg/operator/ceph/csi/version.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "fmt" - "regexp" - "strconv" - - "github.com/pkg/errors" -) - -var ( - //minimum supported version is 3.0.0 - minimum = CephCSIVersion{3, 0, 0} - //supportedCSIVersions are versions that rook supports - releasev310 = CephCSIVersion{3, 1, 0} - releasev320 = CephCSIVersion{3, 2, 0} - releasev330 = CephCSIVersion{3, 3, 0} - releasev340 = CephCSIVersion{3, 4, 0} - supportedCSIVersions = []CephCSIVersion{ - minimum, - releasev310, - releasev320, - releasev330, - releasev340, - } - // omap generator is supported in v3.2.0+ - omapSupportedVersions = releasev320 - // for parsing the output of `cephcsi` - versionCSIPattern = regexp.MustCompile(`v(\d+)\.(\d+)\.(\d+)`) -) - -// CephCSIVersion represents the Ceph CSI version format -type CephCSIVersion struct { - Major int - Minor int - Bugfix int -} - -func (v *CephCSIVersion) String() string { - return fmt.Sprintf("v%d.%d.%d", - v.Major, v.Minor, v.Bugfix) -} - -// SupportsOMAPController checks if the detected version supports OMAP generator -func (v *CephCSIVersion) SupportsOMAPController() bool { - - // if AllowUnsupported is set also a csi-image greater than the supported ones are allowed - if AllowUnsupported { - return true - } - - if !v.isAtLeast(&minimum) { - return false - } - - if v.Major > omapSupportedVersions.Major { - return true - } - if v.Major == omapSupportedVersions.Major { - if v.Minor > omapSupportedVersions.Minor { - return true - } - if v.Minor == omapSupportedVersions.Minor { - return v.Bugfix >= omapSupportedVersions.Bugfix - } - } - - return false -} - -// Supported checks if the detected version is part of the known supported CSI versions -func (v *CephCSIVersion) Supported() bool { - if !v.isAtLeast(&minimum) { - return false - } - - // if AllowUnsupported is set also a csi-image greater than the supported ones are allowed - if AllowUnsupported { - return true - } - for _, sv := range supportedCSIVersions { - if v.Major == sv.Major { - if v.Minor == sv.Minor { - if v.Bugfix >= sv.Bugfix { - return true - } - } - } - } - return false -} - -func (v *CephCSIVersion) isAtLeast(version *CephCSIVersion) bool { - if v.Major > version.Major { - return true - } - if v.Major == version.Major && v.Minor >= version.Minor { - if v.Minor > version.Minor { - return true - } - if v.Bugfix >= version.Bugfix { - return true - } - } - return false -} - -// extractCephCSIVersion extracts the major, minor and extra digit of a Ceph CSI release -func extractCephCSIVersion(src string) (*CephCSIVersion, error) { - m := versionCSIPattern.FindStringSubmatch(src) - if m == nil || len(m) < 3 { - return nil, errors.Errorf("failed to parse version from: %q", CSIParam.CSIPluginImage) - } - - major, err := strconv.Atoi(m[1]) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse version major part: %q", m[0]) - } - - minor, err := strconv.Atoi(m[2]) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse version minor part: %q", m[1]) - } - - bugfix, err := strconv.Atoi(m[3]) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse version bugfix part: %q", m[2]) - } - - return &CephCSIVersion{major, minor, bugfix}, nil -} diff --git a/pkg/operator/ceph/csi/version_test.go b/pkg/operator/ceph/csi/version_test.go deleted file mode 100644 index f09dacf1a..000000000 --- a/pkg/operator/ceph/csi/version_test.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csi - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -var ( - testMinVersion = CephCSIVersion{2, 0, 0} - testReleaseV210 = CephCSIVersion{2, 1, 0} - testReleaseV300 = CephCSIVersion{3, 0, 0} - testReleaseV320 = CephCSIVersion{3, 2, 0} - testReleaseV321 = CephCSIVersion{3, 2, 1} - testReleaseV330 = CephCSIVersion{3, 3, 0} - testReleaseV340 = CephCSIVersion{3, 4, 0} - testVersionUnsupported = CephCSIVersion{4, 0, 0} -) - -func TestIsAtLeast(t *testing.T) { - // Test version which is smaller - var version = CephCSIVersion{1, 40, 10} - ret := testMinVersion.isAtLeast(&version) - assert.Equal(t, true, ret) - - // Test version which is equal - ret = testMinVersion.isAtLeast(&testMinVersion) - assert.Equal(t, true, ret) - - // Test version which is greater (minor) - version = CephCSIVersion{2, 1, 0} - ret = testMinVersion.isAtLeast(&version) - assert.Equal(t, false, ret) - - // Test version which is greater (bugfix) - version = CephCSIVersion{2, 2, 0} - ret = testMinVersion.isAtLeast(&version) - assert.Equal(t, false, ret) - - // Test for v2.1.0 - // Test version which is greater (bugfix) - version = CephCSIVersion{2, 0, 1} - ret = testReleaseV210.isAtLeast(&version) - assert.Equal(t, true, ret) - - // Test version which is equal - ret = testReleaseV210.isAtLeast(&testReleaseV210) - assert.Equal(t, true, ret) - - // Test version which is greater (minor) - version = CephCSIVersion{2, 1, 1} - ret = testReleaseV210.isAtLeast(&version) - assert.Equal(t, false, ret) - - // Test version which is greater (bugfix) - version = CephCSIVersion{2, 2, 0} - ret = testReleaseV210.isAtLeast(&version) - assert.Equal(t, false, ret) - - // Test for 3.0.0 - // Test version which is equal - ret = testReleaseV300.isAtLeast(&testReleaseV300) - assert.Equal(t, true, ret) - - // Test for 3.3.0 - // Test version which is lesser - ret = testReleaseV330.isAtLeast(&testReleaseV300) - assert.Equal(t, true, ret) - - // Test for 3.4.0 - // Test version which is lesser - ret = testReleaseV340.isAtLeast(&testReleaseV330) - assert.Equal(t, true, ret) - - // Test version which is greater (minor) - version = CephCSIVersion{3, 1, 1} - ret = testReleaseV300.isAtLeast(&version) - assert.Equal(t, false, ret) - - // Test version which is greater (bugfix) - version = CephCSIVersion{3, 2, 0} - ret = testReleaseV300.isAtLeast(&version) - assert.Equal(t, false, ret) -} - -func TestSupported(t *testing.T) { - AllowUnsupported = false - ret := testMinVersion.Supported() - assert.Equal(t, false, ret) - - ret = testVersionUnsupported.Supported() - assert.Equal(t, false, ret) - - ret = testReleaseV340.Supported() - assert.Equal(t, true, ret) -} - -func TestSupportOMAPController(t *testing.T) { - AllowUnsupported = true - ret := testMinVersion.SupportsOMAPController() - assert.True(t, ret) - - AllowUnsupported = false - ret = testMinVersion.SupportsOMAPController() - assert.False(t, ret) - - ret = testReleaseV300.SupportsOMAPController() - assert.False(t, ret) - - ret = testReleaseV320.SupportsOMAPController() - assert.True(t, ret) - - ret = testReleaseV321.SupportsOMAPController() - assert.True(t, ret) - - ret = testReleaseV330.SupportsOMAPController() - assert.True(t, ret) -} -func Test_extractCephCSIVersion(t *testing.T) { - expectedVersion := CephCSIVersion{3, 0, 0} - csiString := []byte(`Cephcsi Version: v3.0.0 - Git Commit: e58d537a07ca0184f67d33db85bf6b4911624b44 - Go Version: go1.12.15 - Compiler: gc - Platform: linux/amd64 - `) - version, err := extractCephCSIVersion(string(csiString)) - - assert.Equal(t, &expectedVersion, version) - assert.Nil(t, err) - - csiString = []byte(`Cephcsi Version: rubish - Git Commit: e58d537a07ca0184f67d33db85bf6b4911624b44 - Go Version: go1.12.15 - Compiler: gc - Platform: linux/amd64 - `) - version, err = extractCephCSIVersion(string(csiString)) - - assert.Nil(t, version) - assert.Contains(t, err.Error(), "failed to parse version from") -} diff --git a/pkg/operator/ceph/disruption/clusterdisruption/add.go b/pkg/operator/ceph/disruption/clusterdisruption/add.go deleted file mode 100644 index 1adf15580..000000000 --- a/pkg/operator/ceph/disruption/clusterdisruption/add.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterdisruption - -import ( - "reflect" - - "github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig" - - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/k8sutil" - policyv1 "k8s.io/api/policy/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/types" -) - -// Add adds a new Controller to the Manager based on clusterdisruption.ReconcileClusterDisruption and registers the relevant watches and handlers. -// Read more about how Managers, Controllers, and their Watches, Handlers, Predicates, etc work here: -// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg -func Add(mgr manager.Manager, context *controllerconfig.Context) error { - - // Add the cephv1 scheme to the manager scheme - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - return errors.Wrap(err, "failed to add ceph scheme to manager scheme") - } - - // This will be used to associate namespaces and cephclusters. - sharedClusterMap := &ClusterMap{} - - reconcileClusterDisruption := &ReconcileClusterDisruption{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - clusterMap: sharedClusterMap, - } - reconciler := reconcile.Reconciler(reconcileClusterDisruption) - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: reconciler}) - if err != nil { - return err - } - - cephClusterPredicate := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - logger.Info("create event from ceph cluster CR") - return true - }, - UpdateFunc: func(e event.UpdateEvent) bool { - oldCluster, ok := e.ObjectOld.DeepCopyObject().(*cephv1.CephCluster) - if !ok { - return false - } - newCluster, ok := e.ObjectNew.DeepCopyObject().(*cephv1.CephCluster) - if !ok { - return false - } - return !reflect.DeepEqual(oldCluster.Spec, newCluster.Spec) - }, - } - - // Watch for CephClusters - err = c.Watch(&source.Kind{Type: &cephv1.CephCluster{}}, &handler.EnqueueRequestForObject{}, cephClusterPredicate) - if err != nil { - return err - } - - usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(reconcileClusterDisruption.context.ClusterdContext.Clientset) - if err != nil { - return errors.Wrap(err, "failed to fetch pdb version") - } - - // Only reconcile for PDB update event when allowed disruptions for the main OSD PDB is 0. - // This means that one of the OSD is down due to node drain or any other reason - pdbPredicate := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - // Do not reconcile when PDB is created - return false - }, - UpdateFunc: func(e event.UpdateEvent) bool { - if usePDBV1Beta1 { - pdb, ok := e.ObjectNew.DeepCopyObject().(*policyv1beta1.PodDisruptionBudget) - if !ok { - return false - } - return pdb.Name == osdPDBAppName && pdb.Status.DisruptionsAllowed == 0 - } - pdb, ok := e.ObjectNew.DeepCopyObject().(*policyv1.PodDisruptionBudget) - if !ok { - return false - } - return pdb.Name == osdPDBAppName && pdb.Status.DisruptionsAllowed == 0 - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Do not reconcile when PDB is deleted - return false - }, - } - - // Watch for main PodDisruptionBudget and enqueue the CephCluster in the namespace - if usePDBV1Beta1 { - err = c.Watch( - &source.Kind{Type: &policyv1beta1.PodDisruptionBudget{}}, - handler.EnqueueRequestsFromMapFunc(handler.MapFunc(func(obj client.Object) []reconcile.Request { - pdb, ok := obj.(*policyv1beta1.PodDisruptionBudget) - if !ok { - // Not a pdb, returning empty - logger.Error("PDB handler received non-PDB") - return []reconcile.Request{} - } - namespace := pdb.GetNamespace() - req := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: namespace}} - return []reconcile.Request{req} - }), - ), - pdbPredicate, - ) - if err != nil { - return err - } - } else { - err = c.Watch( - &source.Kind{Type: &policyv1.PodDisruptionBudget{}}, - handler.EnqueueRequestsFromMapFunc(handler.MapFunc(func(obj client.Object) []reconcile.Request { - pdb, ok := obj.(*policyv1.PodDisruptionBudget) - if !ok { - // Not a pdb, returning empty - logger.Error("PDB handler received non-PDB") - return []reconcile.Request{} - } - namespace := pdb.GetNamespace() - req := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: namespace}} - return []reconcile.Request{req} - }), - ), - pdbPredicate, - ) - if err != nil { - return err - } - } - - // enqueues with an empty name that is populated by the reconciler. - // There is a one-per-namespace limit on CephClusters - enqueueByNamespace := handler.EnqueueRequestsFromMapFunc(handler.MapFunc(func(obj client.Object) []reconcile.Request { - // The name will be populated in the reconcile - namespace := obj.GetNamespace() - if len(namespace) == 0 { - logger.Errorf("enqueueByNamespace received an obj without a namespace. %+v", obj) - return []reconcile.Request{} - } - req := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: namespace}} - return []reconcile.Request{req} - }), - ) - - // Watch for CephBlockPools and enqueue the CephCluster in the namespace - err = c.Watch(&source.Kind{Type: &cephv1.CephBlockPool{}}, enqueueByNamespace) - if err != nil { - return err - } - - // Watch for CephFileSystems and enqueue the CephCluster in the namespace - err = c.Watch(&source.Kind{Type: &cephv1.CephFilesystem{}}, enqueueByNamespace) - if err != nil { - return err - } - - // Watch for CephObjectStores and enqueue the CephCluster in the namespace - err = c.Watch(&source.Kind{Type: &cephv1.CephObjectStore{}}, enqueueByNamespace) - if err != nil { - return err - } - - return nil -} diff --git a/pkg/operator/ceph/disruption/clusterdisruption/doc.go b/pkg/operator/ceph/disruption/clusterdisruption/doc.go deleted file mode 100644 index bd32ebbff..000000000 --- a/pkg/operator/ceph/disruption/clusterdisruption/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package clusterdisruption implements the controller for ensuring that drains occus in a safe manner. -The design and purpose for clusterdisruption management is found at: -https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md -*/ -package clusterdisruption diff --git a/pkg/operator/ceph/disruption/clusterdisruption/osd.go b/pkg/operator/ceph/disruption/clusterdisruption/osd.go deleted file mode 100644 index 85792ca4f..000000000 --- a/pkg/operator/ceph/disruption/clusterdisruption/osd.go +++ /dev/null @@ -1,647 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterdisruption - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/pkg/errors" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/sets" -) - -const ( - // osdPDBAppName is that app label value for pdbs targeting osds - osdPDBAppName = "rook-ceph-osd" - drainingFailureDomainKey = "draining-failure-domain" - drainingFailureDomainDurationKey = "draining-failure-domain-duration" - setNoOut = "set-no-out" - pgHealthCheckDurationKey = "pg-health-check-duration" - // DefaultMaintenanceTimeout is the period for which a drained failure domain will remain in noout - DefaultMaintenanceTimeout = 30 * time.Minute - nooutFlag = "noout" -) - -func (r *ReconcileClusterDisruption) createPDB(pdb client.Object) error { - err := r.client.Create(context.TODO(), pdb) - if err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create pdb %q", pdb.GetName()) - } - return nil -} - -func (r *ReconcileClusterDisruption) deletePDB(pdb client.Object) error { - err := r.client.Delete(context.TODO(), pdb) - if err != nil && !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to delete pdb %q", pdb.GetName()) - } - return nil -} - -// createDefaultPDBforOSD creates a single PDB for all OSDs with maxUnavailable=1 -// This allows all OSDs in a single failure domain to go down. -func (r *ReconcileClusterDisruption) createDefaultPDBforOSD(namespace string) error { - cephCluster, ok := r.clusterMap.GetCluster(namespace) - if !ok { - return errors.Errorf("failed to find the namespace %q in the clustermap", namespace) - } - pdbRequest := types.NamespacedName{Name: osdPDBAppName, Namespace: namespace} - objectMeta := metav1.ObjectMeta{ - Name: osdPDBAppName, - Namespace: namespace, - } - selector := &metav1.LabelSelector{ - MatchLabels: map[string]string{k8sutil.AppAttr: osdPDBAppName}, - } - usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(r.context.ClusterdContext.Clientset) - if err != nil { - return errors.Wrap(err, "failed to fetch pdb version") - } - if usePDBV1Beta1 { - pdb := &policyv1beta1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - Spec: policyv1beta1.PodDisruptionBudgetSpec{ - MaxUnavailable: &intstr.IntOrString{IntVal: 1}, - Selector: selector, - }, - } - ownerInfo := k8sutil.NewOwnerInfo(cephCluster, r.scheme) - err := ownerInfo.SetControllerReference(pdb) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to pdb %v", pdb) - } - - err = r.client.Get(context.TODO(), pdbRequest, &policyv1beta1.PodDisruptionBudget{}) - if err != nil { - if apierrors.IsNotFound(err) { - logger.Info("all PGs are active+clean. Restoring default OSD pdb settings") - logger.Infof("creating the default pdb %q with maxUnavailable=1 for all osd", osdPDBAppName) - return r.createPDB(pdb) - } - return errors.Wrapf(err, "failed to get pdb %q", pdb.Name) - } - return nil - } - pdb := &policyv1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - Spec: policyv1.PodDisruptionBudgetSpec{ - MaxUnavailable: &intstr.IntOrString{IntVal: 1}, - Selector: selector, - }, - } - ownerInfo := k8sutil.NewOwnerInfo(cephCluster, r.scheme) - err = ownerInfo.SetControllerReference(pdb) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to pdb %v", pdb) - } - - err = r.client.Get(context.TODO(), pdbRequest, &policyv1.PodDisruptionBudget{}) - if err != nil { - if apierrors.IsNotFound(err) { - logger.Info("all PGs are active+clean. Restoring default OSD pdb settings") - logger.Infof("creating the default pdb %q with maxUnavailable=1 for all osd", osdPDBAppName) - return r.createPDB(pdb) - } - return errors.Wrapf(err, "failed to get pdb %q", pdb.Name) - } - return nil -} - -func (r *ReconcileClusterDisruption) deleteDefaultPDBforOSD(namespace string) error { - pdbRequest := types.NamespacedName{Name: osdPDBAppName, Namespace: namespace} - objectMeta := metav1.ObjectMeta{ - Name: osdPDBAppName, - Namespace: namespace, - } - usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(r.context.ClusterdContext.Clientset) - if err != nil { - return errors.Wrap(err, "failed to fetch pdb version") - } - if usePDBV1Beta1 { - pdb := &policyv1beta1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - } - err := r.client.Get(context.TODO(), pdbRequest, &policyv1beta1.PodDisruptionBudget{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil - } - return errors.Wrapf(err, "failed to get pdb %q", pdb.Name) - } - logger.Infof("deleting the default pdb %q with maxUnavailable=1 for all osd", osdPDBAppName) - return r.deletePDB(pdb) - } - pdb := &policyv1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - } - err = r.client.Get(context.TODO(), pdbRequest, &policyv1.PodDisruptionBudget{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil - } - return errors.Wrapf(err, "failed to get pdb %q", pdb.Name) - } - logger.Infof("deleting the default pdb %q with maxUnavailable=1 for all osd", osdPDBAppName) - return r.deletePDB(pdb) -} - -// createBlockingPDBForOSD creates individual blocking PDBs (maxUnavailable=0) for all the OSDs in -// failure domains that are not draining -func (r *ReconcileClusterDisruption) createBlockingPDBForOSD(namespace, failureDomainType, failureDomainName string) error { - cephCluster, ok := r.clusterMap.GetCluster(namespace) - if !ok { - return errors.Errorf("failed to find the namespace %q in the clustermap", namespace) - } - - pdbName := getPDBName(failureDomainType, failureDomainName) - pdbRequest := types.NamespacedName{Name: pdbName, Namespace: namespace} - objectMeta := metav1.ObjectMeta{ - Name: pdbName, - Namespace: namespace, - } - selector := &metav1.LabelSelector{ - MatchLabels: map[string]string{fmt.Sprintf(osd.TopologyLocationLabel, failureDomainType): failureDomainName}, - } - usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(r.context.ClusterdContext.Clientset) - if err != nil { - return errors.Wrap(err, "failed to fetch pdb version") - } - if usePDBV1Beta1 { - pdb := &policyv1beta1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - Spec: policyv1beta1.PodDisruptionBudgetSpec{ - MaxUnavailable: &intstr.IntOrString{IntVal: 0}, - Selector: selector, - }, - } - ownerInfo := k8sutil.NewOwnerInfo(cephCluster, r.scheme) - err := ownerInfo.SetControllerReference(pdb) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to pdb %v", pdb) - } - err = r.client.Get(context.TODO(), pdbRequest, &policyv1beta1.PodDisruptionBudget{}) - if err != nil { - if apierrors.IsNotFound(err) { - logger.Infof("creating temporary blocking pdb %q with maxUnavailable=0 for %q failure domain %q", pdbName, failureDomainType, failureDomainName) - return r.createPDB(pdb) - } - return errors.Wrapf(err, "failed to get pdb %q", pdb.Name) - } - return nil - } - pdb := &policyv1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - Spec: policyv1.PodDisruptionBudgetSpec{ - MaxUnavailable: &intstr.IntOrString{IntVal: 0}, - Selector: selector, - }, - } - ownerInfo := k8sutil.NewOwnerInfo(cephCluster, r.scheme) - err = ownerInfo.SetControllerReference(pdb) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to pdb %v", pdb) - } - err = r.client.Get(context.TODO(), pdbRequest, &policyv1.PodDisruptionBudget{}) - if err != nil { - if apierrors.IsNotFound(err) { - logger.Infof("creating temporary blocking pdb %q with maxUnavailable=0 for %q failure domain %q", pdbName, failureDomainType, failureDomainName) - return r.createPDB(pdb) - } - return errors.Wrapf(err, "failed to get pdb %q", pdb.Name) - } - return nil -} - -func (r *ReconcileClusterDisruption) deleteBlockingPDBForOSD(namespace, failureDomainType, failureDomainName string) error { - pdbName := getPDBName(failureDomainType, failureDomainName) - pdbRequest := types.NamespacedName{Name: pdbName, Namespace: namespace} - objectMeta := metav1.ObjectMeta{ - Name: pdbName, - Namespace: namespace, - } - usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(r.context.ClusterdContext.Clientset) - if err != nil { - return errors.Wrap(err, "failed to fetch pdb version") - } - if usePDBV1Beta1 { - pdb := &policyv1beta1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - } - err := r.client.Get(context.TODO(), pdbRequest, &policyv1beta1.PodDisruptionBudget{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil - } - return errors.Wrapf(err, "failed to get pdb %q", pdb.Name) - } - logger.Infof("deleting temporary blocking pdb with %q with maxUnavailable=0 for %q failure domain %q", pdbName, failureDomainType, failureDomainName) - return r.deletePDB(pdb) - } - pdb := &policyv1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - } - err = r.client.Get(context.TODO(), pdbRequest, &policyv1.PodDisruptionBudget{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil - } - return errors.Wrapf(err, "failed to get pdb %q", pdb.Name) - } - logger.Infof("deleting temporary blocking pdb with %q with maxUnavailable=0 for %q failure domain %q", pdbName, failureDomainType, failureDomainName) - return r.deletePDB(pdb) -} - -func (r *ReconcileClusterDisruption) initializePDBState(request reconcile.Request) (*corev1.ConfigMap, error) { - pdbStateMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: pdbStateMapName, - Namespace: request.Namespace, - }, - } - pdbStateMapRequest := types.NamespacedName{ - Name: pdbStateMapName, - Namespace: request.Namespace, - } - err := r.client.Get(context.TODO(), pdbStateMapRequest, pdbStateMap) - - if apierrors.IsNotFound(err) { - // create configmap to track the draining failure domain - pdbStateMap.Data = map[string]string{drainingFailureDomainKey: "", setNoOut: ""} - err := r.client.Create(context.TODO(), pdbStateMap) - if err != nil { - return pdbStateMap, errors.Wrapf(err, "failed to create the PDB state map %q", pdbStateMapRequest) - } - } else if err != nil { - return pdbStateMap, errors.Wrapf(err, "failed to get the pdbStateMap %s", pdbStateMapRequest) - } - return pdbStateMap, nil -} - -func (r *ReconcileClusterDisruption) reconcilePDBsForOSDs( - clusterInfo *cephclient.ClusterInfo, - request reconcile.Request, - pdbStateMap *corev1.ConfigMap, - failureDomainType string, - allFailureDomains, - osdDownFailureDomains []string, - activeNodeDrains bool, -) (reconcile.Result, error) { - var osdDown bool - var drainingFailureDomain string - if len(osdDownFailureDomains) > 0 { - osdDown = true - drainingFailureDomain = osdDownFailureDomains[0] - } - - pgHealthMsg, pgClean, err := cephclient.IsClusterClean(r.context.ClusterdContext, clusterInfo) - if err != nil { - // If the error contains that message, this means the cluster is not up and running - // No monitors are present and thus no ceph configuration has been created - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Infof("Ceph %q cluster not ready, cannot check Ceph status yet.", request.Namespace) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil - } - return reconcile.Result{}, errors.Wrapf(err, "failed to check cluster health") - } - - switch { - // osd is down but pgs are active+clean - case osdDown && pgClean: - lastDrainTimeStamp, err := getLastDrainTimeStamp(pdbStateMap, drainingFailureDomainDurationKey) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to get last drain timestamp from the configmap %q", pdbStateMap.Name) - } - timeSinceOSDDown := time.Since(lastDrainTimeStamp) - if timeSinceOSDDown > 30*time.Second { - logger.Infof("osd is down in failure domain %q is down for the last %.2f minutes, but pgs are active+clean", drainingFailureDomain, timeSinceOSDDown.Minutes()) - resetPDBConfig(pdbStateMap) - } else { - logger.Infof("osd is down in the failure domain %q, but pgs are active+clean. Requeuing in case pg status is not updated yet...", drainingFailureDomain) - return reconcile.Result{Requeue: true, RequeueAfter: 15 * time.Second}, nil - } - - // osd is down and pgs are not healthy - case osdDown && !pgClean: - logger.Infof("osd is down in failure domain %q and pgs are not active+clean. pg health: %q", drainingFailureDomain, pgHealthMsg) - currentlyDrainingFD, ok := pdbStateMap.Data[drainingFailureDomainKey] - if !ok || drainingFailureDomain != currentlyDrainingFD { - pdbStateMap.Data[drainingFailureDomainKey] = drainingFailureDomain - pdbStateMap.Data[drainingFailureDomainDurationKey] = time.Now().Format(time.RFC3339) - } - if activeNodeDrains { - pdbStateMap.Data[setNoOut] = "true" - } - - // osd is back up and either pgs have become healthy or pg healthy check timeout has elapsed - case !osdDown && (pgClean || r.hasPGHealthCheckTimedout(pdbStateMap)): - // reset the configMap if cluster is clean or if the timeout for PGs to become active+clean has exceeded - logger.Debugf("no OSD is down in the %q failure domains: %v. pg health: %q", failureDomainType, allFailureDomains, pgHealthMsg) - resetPDBConfig(pdbStateMap) - - default: - logger.Infof("all %q failure domains: %v. osd is down in failure domain: %q. active node drains: %t. pg health: %q", failureDomainType, - allFailureDomains, drainingFailureDomain, activeNodeDrains, pgHealthMsg) - } - - if pdbStateMap.Data[setNoOut] == "true" { - err = r.updateNoout(clusterInfo, pdbStateMap, allFailureDomains) - if err != nil { - logger.Errorf("failed to update maintenance noout in cluster %q. %v", request, err) - } - } - - if pdbStateMap.Data[drainingFailureDomainKey] != "" && !pgClean { - // delete default OSD pdb and create blocking OSD pdbs - err := r.handleActiveDrains(allFailureDomains, pdbStateMap.Data[drainingFailureDomainKey], failureDomainType, clusterInfo.Namespace, pgClean) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to handle active drains") - } - } else if pdbStateMap.Data[drainingFailureDomainKey] == "" { - // delete all blocking OSD pdb and restore the default OSD pdb - err := r.handleInactiveDrains(allFailureDomains, failureDomainType, clusterInfo.Namespace) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to handle inactive drains") - } - // reset `set-no-out` flag on the configMap - pdbStateMap.Data[setNoOut] = "" - } - - err = r.client.Update(context.TODO(), pdbStateMap) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to update configMap %q in cluster %q", pdbStateMapName, request) - } - - // requeue if drain is still in progress - if len(pdbStateMap.Data[drainingFailureDomainKey]) > 0 { - return reconcile.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileClusterDisruption) handleActiveDrains(allFailureDomains []string, drainingFailureDomain, - failureDomainType, namespace string, isClean bool) error { - - for _, failureDomainName := range allFailureDomains { - // create blocking PDB for failure domains not currently draining - if failureDomainName != drainingFailureDomain { - err := r.createBlockingPDBForOSD(namespace, failureDomainType, failureDomainName) - if err != nil { - return errors.Wrapf(err, "failed to create blocking pdb for %q failure domain %q", failureDomainType, failureDomainName) - } - } else { - if isClean { - err := r.deleteBlockingPDBForOSD(namespace, failureDomainType, failureDomainName) - if err != nil { - return errors.Wrapf(err, "failed to delete pdb for %q failure domain %q. %v", failureDomainType, failureDomainName, err) - } - } - } - } - - // delete the default PDB for OSD - // This will allow all OSDs in the currently drained failure domain to be removed. - logger.Debug("deleting default pdb with maxUnavailable=1 for all osd") - err := r.deleteDefaultPDBforOSD(namespace) - if err != nil { - return errors.Wrap(err, "failed to delete the default osd pdb") - } - return nil -} - -func (r *ReconcileClusterDisruption) handleInactiveDrains(allFailureDomains []string, failureDomainType, namespace string) error { - err := r.createDefaultPDBforOSD(namespace) - if err != nil { - return errors.Wrap(err, "failed to create default pdb") - } - for _, failureDomainName := range allFailureDomains { - err := r.deleteBlockingPDBForOSD(namespace, failureDomainType, failureDomainName) - if err != nil { - return errors.Wrapf(err, "failed to delete pdb for %q failure domain %q. %v", failureDomainType, failureDomainName, err) - } - logger.Debugf("deleted temporary blocking pdb for %q failure domain %q.", failureDomainType, failureDomainName) - } - return nil -} - -func (r *ReconcileClusterDisruption) updateNoout(clusterInfo *cephclient.ClusterInfo, pdbStateMap *corev1.ConfigMap, allFailureDomains []string) error { - drainingFailureDomain := pdbStateMap.Data[drainingFailureDomainKey] - osdDump, err := cephclient.GetOSDDump(r.context.ClusterdContext, clusterInfo) - if err != nil { - return errors.Wrapf(err, "failed to get osddump for reconciling maintenance noout in namespace %s", clusterInfo.Namespace) - } - for _, failureDomainName := range allFailureDomains { - drainingFailureDomainTimeStampKey := fmt.Sprintf("%s-noout-last-set-at", failureDomainName) - if drainingFailureDomain == failureDomainName { - - // get the time stamp - nooutSetTimeString, ok := pdbStateMap.Data[drainingFailureDomainTimeStampKey] - if !ok || len(nooutSetTimeString) == 0 { - // initialize it if it's not set - pdbStateMap.Data[drainingFailureDomainTimeStampKey] = time.Now().Format(time.RFC3339) - } - // parse the timestamp - nooutSetTime, err := time.Parse(time.RFC3339, pdbStateMap.Data[drainingFailureDomainTimeStampKey]) - if err != nil { - return errors.Wrapf(err, "failed to parse timestamp %s for failureDomain %s", pdbStateMap.Data[drainingFailureDomainTimeStampKey], nooutSetTime) - } - if time.Since(nooutSetTime) >= r.maintenanceTimeout { - // noout expired - if _, err := osdDump.UpdateFlagOnCrushUnit(r.context.ClusterdContext, clusterInfo, false, failureDomainName, nooutFlag); err != nil { - return errors.Wrapf(err, "failed to update flag on crush unit when noout expired.") - } - } else { - // set noout - if _, err := osdDump.UpdateFlagOnCrushUnit(r.context.ClusterdContext, clusterInfo, true, failureDomainName, nooutFlag); err != nil { - return errors.Wrapf(err, "failed to update flag on crush unit while setting noout.") - } - } - - } else { - // ensure noout unset - if _, err := osdDump.UpdateFlagOnCrushUnit(r.context.ClusterdContext, clusterInfo, false, failureDomainName, nooutFlag); err != nil { - return errors.Wrapf(err, "failed to update flag on crush unit when ensuring noout is unset.") - } - // delete the timestamp - delete(pdbStateMap.Data, drainingFailureDomainTimeStampKey) - } - } - return nil -} - -func (r *ReconcileClusterDisruption) getOSDFailureDomains(clusterInfo *cephclient.ClusterInfo, request reconcile.Request, poolFailureDomain string) ([]string, []string, []string, error) { - osdDeploymentList := &appsv1.DeploymentList{} - namespaceListOpts := client.InNamespace(request.Namespace) - topologyLocationLabel := fmt.Sprintf(osd.TopologyLocationLabel, poolFailureDomain) - err := r.client.List(context.TODO(), osdDeploymentList, client.MatchingLabels{k8sutil.AppAttr: osd.AppName}, namespaceListOpts) - if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to list osd deployments") - } - - allFailureDomains := sets.NewString() - nodeDrainFailureDomains := sets.NewString() - osdDownFailureDomains := sets.NewString() - - for _, deployment := range osdDeploymentList.Items { - labels := deployment.Spec.Template.ObjectMeta.GetLabels() - failureDomainName := labels[topologyLocationLabel] - if failureDomainName == "" { - return nil, nil, nil, errors.Errorf("failed to get the topology location label %q in OSD deployment %q", - topologyLocationLabel, deployment.Name) - } - - // Assume node drain if osd deployment ReadyReplicas count is 0 and OSD pod is not scheduled on a node - if deployment.Status.ReadyReplicas < 1 { - if !osdDownFailureDomains.Has(failureDomainName) { - osdDownFailureDomains.Insert(failureDomainName) - } - isDrained, err := hasOSDNodeDrained(r.client, request.Namespace, labels[osd.OsdIdLabelKey]) - if err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to check if osd %q node is drained", deployment.Name) - } - if isDrained { - logger.Infof("osd %q is down and a possible node drain is detected", deployment.Name) - if !nodeDrainFailureDomains.Has(failureDomainName) { - nodeDrainFailureDomains.Insert(failureDomainName) - } - } else { - logger.Infof("osd %q is down but no node drain is detected", deployment.Name) - } - } - - if !allFailureDomains.Has(failureDomainName) { - allFailureDomains.Insert(failureDomainName) - } - } - return allFailureDomains.List(), nodeDrainFailureDomains.List(), osdDownFailureDomains.List(), nil -} - -func (r *ReconcileClusterDisruption) hasPGHealthCheckTimedout(pdbStateMap *corev1.ConfigMap) bool { - if r.pgHealthCheckTimeout == 0 { - logger.Debug("pg health check timeout is not set in the cluster. waiting for PGs to get active+clean") - return false - } - - timeString, ok := pdbStateMap.Data[pgHealthCheckDurationKey] - if !ok || len(timeString) == 0 { - pdbStateMap.Data[pgHealthCheckDurationKey] = time.Now().Format(time.RFC3339) - } else { - pgHealthCheckDuration, err := time.Parse(time.RFC3339, timeString) - if err != nil { - logger.Errorf("failed to parse timestamp %v. %v", pgHealthCheckDuration, err) - pdbStateMap.Data[pgHealthCheckDurationKey] = time.Now().Format(time.RFC3339) - return false - } - timeElapsed := time.Since(pgHealthCheckDuration) - if timeElapsed >= r.pgHealthCheckTimeout { - logger.Info("timed out waiting for the PGs to become active+clean") - return true - } - timeleft := r.pgHealthCheckTimeout - timeElapsed - logger.Infof("waiting for %d minute(s) for PGs to become active+clean", int(timeleft.Minutes())) - } - return false -} - -// hasNodeDrained returns true if OSD pod is not assigned to any node or if the OSD node is not schedulable -func hasOSDNodeDrained(c client.Client, namespace, osdID string) (bool, error) { - osdNodeName, err := getOSDNodeName(c, namespace, osdID) - if err != nil { - return false, errors.Wrapf(err, "failed to get node name assigned to OSD %q POD", osdID) - } - - if osdNodeName == "" { - logger.Debugf("osd %q POD is not assigned to any node. assuming node drain", osdID) - return true, nil - } - - node, err := getNode(c, osdNodeName) - if err != nil { - return false, errors.Wrapf(err, "failed to get node assigned to OSD %q POD", osdID) - } - return node.Spec.Unschedulable, nil -} - -func getOSDNodeName(c client.Client, namespace, osdID string) (string, error) { - pods := &corev1.PodList{} - listOpts := []client.ListOption{ - client.InNamespace(namespace), - client.MatchingLabels{osd.OsdIdLabelKey: osdID}, - } - - err := c.List(context.TODO(), pods, listOpts...) - if err != nil { - return "", errors.Wrapf(err, "failed to list pods for osd %q", osdID) - } - - if len(pods.Items) > 0 { - return pods.Items[0].Spec.NodeName, nil - } - return "", nil -} - -func getNode(c client.Client, nodeName string) (*corev1.Node, error) { - node := &corev1.Node{} - err := c.Get(context.TODO(), types.NamespacedName{Name: nodeName}, node) - if err != nil { - return nil, errors.Wrapf(err, "failed to get node %q", nodeName) - } - return node, nil -} - -func getPDBName(failureDomainType, failureDomainName string) string { - return k8sutil.TruncateNodeName(fmt.Sprintf("%s-%s-%s", osdPDBAppName, failureDomainType, "%s"), failureDomainName) -} - -func getLastDrainTimeStamp(pdbStateMap *corev1.ConfigMap, key string) (time.Time, error) { - var err error - var lastDrainTimeStamp time.Time - lastDrainTimeStampString, ok := pdbStateMap.Data[key] - if !ok || len(lastDrainTimeStampString) == 0 { - return time.Now(), nil - } else { - lastDrainTimeStamp, err = time.Parse(time.RFC3339, pdbStateMap.Data[key]) - if err != nil { - return time.Time{}, errors.Wrapf(err, "failed to parse timestamp %q", pdbStateMap.Data[key]) - } - } - - return lastDrainTimeStamp, nil -} - -func resetPDBConfig(pdbStateMap *corev1.ConfigMap) { - pdbStateMap.Data[drainingFailureDomainKey] = "" - delete(pdbStateMap.Data, drainingFailureDomainDurationKey) - delete(pdbStateMap.Data, pgHealthCheckDurationKey) -} diff --git a/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go b/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go deleted file mode 100644 index 7261dde11..000000000 --- a/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go +++ /dev/null @@ -1,466 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterdisruption - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - healthyCephStatus = `{"fsid":"877a47e0-7f6c-435e-891a-76983ab8c509","health":{"checks":{},"status":"HEALTH_OK"},"election_epoch":12,"quorum":[0,1,2],"quorum_names":["a","b","c"],"monmap":{"epoch":3,"fsid":"877a47e0-7f6c-435e-891a-76983ab8c509","modified":"2020-11-02 09:58:23.015313","created":"2020-11-02 09:57:37.719235","min_mon_release":14,"min_mon_release_name":"nautilus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"172.30.74.42:3300","nonce":0},{"type":"v1","addr":"172.30.74.42:6789","nonce":0}]},"addr":"172.30.74.42:6789/0","public_addr":"172.30.74.42:6789/0"},{"rank":1,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"172.30.101.61:3300","nonce":0},{"type":"v1","addr":"172.30.101.61:6789","nonce":0}]},"addr":"172.30.101.61:6789/0","public_addr":"172.30.101.61:6789/0"},{"rank":2,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"172.30.250.55:3300","nonce":0},{"type":"v1","addr":"172.30.250.55:6789","nonce":0}]},"addr":"172.30.250.55:6789/0","public_addr":"172.30.250.55:6789/0"}]},"osdmap":{"osdmap":{"epoch":19,"num_osds":3,"num_up_osds":3,"num_in_osds":3,"num_remapped_pgs":0}},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":96}],"num_pgs":96,"num_pools":3,"num_objects":79,"data_bytes":81553681,"bytes_used":3255447552,"bytes_avail":1646011994112,"bytes_total":1649267441664,"read_bytes_sec":853,"write_bytes_sec":5118,"read_op_per_sec":1,"write_op_per_sec":0},"fsmap":{"epoch":9,"id":1,"up":1,"in":1,"max":1,"by_rank":[{"filesystem_id":1,"rank":0,"name":"ocs-storagecluster-cephfilesystem-b","status":"up:active","gid":14161},{"filesystem_id":1,"rank":0,"name":"ocs-storagecluster-cephfilesystem-a","status":"up:standby-replay","gid":24146}],"up:standby":0},"mgrmap":{"epoch":10,"active_gid":14122,"active_name":"a","active_addrs":{"addrvec":[{"type":"v2","addr":"10.131.0.28:6800","nonce":1},{"type":"v1","addr":"10.131.0.28:6801","nonce":1}]}}}` - unHealthyCephStatus = `{"fsid":"613975f3-3025-4802-9de1-a2280b950e75","health":{"checks":{"OSD_DOWN":{"severity":"HEALTH_WARN","summary":{"message":"1 osds down"}},"OSD_HOST_DOWN":{"severity":"HEALTH_WARN","summary":{"message":"1 host (1 osds) down"}},"PG_AVAILABILITY":{"severity":"HEALTH_WARN","summary":{"message":"Reduced data availability: 101 pgs stale"}},"POOL_APP_NOT_ENABLED":{"severity":"HEALTH_WARN","summary":{"message":"application not enabled on 1 pool(s)"}}},"status":"HEALTH_WARN","overall_status":"HEALTH_WARN"},"election_epoch":12,"quorum":[0,1,2],"quorum_names":["rook-ceph-mon0","rook-ceph-mon2","rook-ceph-mon1"],"monmap":{"epoch":3,"fsid":"613975f3-3025-4802-9de1-a2280b950e75","modified":"2017-08-11 20:13:02.075679","created":"2017-08-11 20:12:35.314510","features":{"persistent":["kraken","luminous"],"optional":[]},"mons":[{"rank":0,"name":"rook-ceph-mon0","addr":"10.3.0.45:6789/0","public_addr":"10.3.0.45:6789/0"},{"rank":1,"name":"rook-ceph-mon2","addr":"10.3.0.249:6789/0","public_addr":"10.3.0.249:6789/0"},{"rank":2,"name":"rook-ceph-mon1","addr":"10.3.0.252:6789/0","public_addr":"10.3.0.252:6789/0"}]},"osdmap":{"osdmap":{"epoch":17,"num_osds":2,"num_up_osds":1,"num_in_osds":2,"full":false,"nearfull":true,"num_remapped_pgs":0}},"pgmap":{"pgs_by_state":[{"state_name":"stale+active+clean","count":101},{"state_name":"active+clean","count":99}],"num_pgs":200,"num_pools":2,"num_objects":243,"data_bytes":976793635,"bytes_used":13611479040,"bytes_avail":19825307648,"bytes_total":33436786688},"fsmap":{"epoch":1,"by_rank":[]},"mgrmap":{"epoch":3,"active_gid":14111,"active_name":"rook-ceph-mgr0","active_addr":"10.2.73.6:6800/9","available":true,"standbys":[],"modules":["restful","status"],"available_modules":["dashboard","prometheus","restful","status","zabbix"]},"servicemap":{"epoch":1,"modified":"0.000000","services":{}}}` -) - -var nodeName = "node01" -var namespace = "rook-ceph" - -var cephCluster = &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "ceph-cluster"}, -} - -var nodeObj = &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: nodeName}, - Spec: corev1.NodeSpec{ - Unschedulable: false, - }, -} - -var unschedulableNodeObj = &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: nodeName}, - Spec: corev1.NodeSpec{ - Unschedulable: true, - }, -} - -func fakeOSDDeployment(id, readyReplicas int) appsv1.Deployment { - osd := appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("osd-%d", id), - Namespace: namespace, - Labels: map[string]string{ - "app": "rook-ceph-osd", - }, - }, - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "topology-location-zone": fmt.Sprintf("zone-%d", id), - "ceph-osd-id": fmt.Sprintf("%d", id), - }, - }, - }, - }, - Status: appsv1.DeploymentStatus{ - ReadyReplicas: int32(readyReplicas), - }, - } - return osd -} - -func fakeOSDPod(id int, nodeName string) corev1.Pod { - osdPod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("osd-%d", id), - Namespace: namespace, - Labels: map[string]string{ - "app": "rook-ceph-osd", - "ceph-osd-id": fmt.Sprintf("%d", id), - }, - }, - Spec: corev1.PodSpec{ - NodeName: nodeName, - }, - } - return osdPod -} - -func fakePDBConfigMap(drainingFailureDomain string) *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: pdbStateMapName, Namespace: namespace}, - Data: map[string]string{drainingFailureDomainKey: drainingFailureDomain, setNoOut: ""}, - } -} - -func getFakeReconciler(t *testing.T, obj ...runtime.Object) *ReconcileClusterDisruption { - scheme := scheme.Scheme - err := policyv1.AddToScheme(scheme) - assert.NoError(t, err) - err = policyv1beta1.AddToScheme(scheme) - assert.NoError(t, err) - - err = appsv1.AddToScheme(scheme) - assert.NoError(t, err) - err = corev1.AddToScheme(scheme) - assert.NoError(t, err) - client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(obj...).Build() - - return &ReconcileClusterDisruption{ - client: client, - scheme: scheme, - clusterMap: &ClusterMap{clusterMap: map[string]*cephv1.CephCluster{namespace: cephCluster}}, - } -} - -func getFakeClusterInfo() *client.ClusterInfo { - sharedClusterMap := &ClusterMap{} - sharedClusterMap.UpdateClusterMap(namespace, cephCluster) - return sharedClusterMap.GetClusterInfo(namespace) - -} - -func TestGetOSDFailureDomains(t *testing.T) { - testcases := []struct { - name string - osds []appsv1.Deployment - osdPods []corev1.Pod - node *corev1.Node - expectedAllFailureDomains []string - expectedDrainingFailureDomains []string - expectedOsdDownFailureDomains []string - }{ - { - name: "case 1: all osds are running", - osds: []appsv1.Deployment{fakeOSDDeployment(1, 1), fakeOSDDeployment(2, 1), - fakeOSDDeployment(3, 1)}, - osdPods: []corev1.Pod{fakeOSDPod(1, nodeName), fakeOSDPod(2, nodeName), - fakeOSDPod(3, nodeName)}, - node: nodeObj, - expectedAllFailureDomains: []string{"zone-1", "zone-2", "zone-3"}, - expectedOsdDownFailureDomains: []string{}, - expectedDrainingFailureDomains: []string{}, - }, - { - name: "case 2: osd in zone-1 is pending and node is unscheduable", - osds: []appsv1.Deployment{fakeOSDDeployment(1, 0), fakeOSDDeployment(2, 1), - fakeOSDDeployment(3, 1)}, - osdPods: []corev1.Pod{fakeOSDPod(1, ""), fakeOSDPod(2, nodeName), - fakeOSDPod(3, nodeName)}, - node: nodeObj, - expectedAllFailureDomains: []string{"zone-1", "zone-2", "zone-3"}, - expectedOsdDownFailureDomains: []string{"zone-1"}, - expectedDrainingFailureDomains: []string{"zone-1"}, - }, - { - name: "case 3: osd in zone-1 and zone-2 are pending and node is unscheduable", - osds: []appsv1.Deployment{fakeOSDDeployment(1, 0), fakeOSDDeployment(2, 0), - fakeOSDDeployment(3, 1)}, - osdPods: []corev1.Pod{fakeOSDPod(1, ""), fakeOSDPod(2, ""), - fakeOSDPod(3, nodeName)}, - node: nodeObj, - expectedAllFailureDomains: []string{"zone-1", "zone-2", "zone-3"}, - expectedOsdDownFailureDomains: []string{"zone-1", "zone-2"}, - expectedDrainingFailureDomains: []string{"zone-1", "zone-2"}, - }, - { - name: "case 4: osd in zone-1 is pending but osd node is scheduable", - osds: []appsv1.Deployment{fakeOSDDeployment(1, 0), fakeOSDDeployment(2, 1), - fakeOSDDeployment(3, 1)}, - osdPods: []corev1.Pod{fakeOSDPod(1, nodeName), fakeOSDPod(2, nodeName), - fakeOSDPod(3, nodeName)}, - node: nodeObj, - expectedAllFailureDomains: []string{"zone-1", "zone-2", "zone-3"}, - expectedOsdDownFailureDomains: []string{"zone-1"}, - expectedDrainingFailureDomains: []string{}, - }, - { - name: "case 5: osd in zone-1 is pending but osd node is not scheduable", - osds: []appsv1.Deployment{fakeOSDDeployment(1, 0), fakeOSDDeployment(2, 1), - fakeOSDDeployment(3, 1)}, - osdPods: []corev1.Pod{fakeOSDPod(1, nodeName), fakeOSDPod(2, nodeName), - fakeOSDPod(3, nodeName)}, - node: unschedulableNodeObj, - expectedAllFailureDomains: []string{"zone-1", "zone-2", "zone-3"}, - expectedOsdDownFailureDomains: []string{"zone-1"}, - expectedDrainingFailureDomains: []string{"zone-1"}, - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - objs := []runtime.Object{ - cephCluster, - &corev1.ConfigMap{}, - tc.node, - } - for _, osdDeployment := range tc.osds { - objs = append(objs, osdDeployment.DeepCopy()) - } - for _, osdPod := range tc.osdPods { - objs = append(objs, osdPod.DeepCopy()) - } - r := getFakeReconciler(t, objs...) - clusterInfo := getFakeClusterInfo() - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: namespace}} - allfailureDomains, nodeDrainFailureDomains, osdDownFailureDomains, err := r.getOSDFailureDomains(clusterInfo, request, "zone") - assert.NoError(t, err) - assert.Equal(t, tc.expectedAllFailureDomains, allfailureDomains) - assert.Equal(t, tc.expectedDrainingFailureDomains, nodeDrainFailureDomains) - assert.Equal(t, tc.expectedOsdDownFailureDomains, osdDownFailureDomains) - }) - } -} - -func TestGetOSDFailureDomainsError(t *testing.T) { - testcases := []struct { - name string - osds []appsv1.Deployment - expectedAllFailureDomains []string - expectedDrainingFailureDomains []string - expectedOsdDownFailureDomains []string - }{ - { - name: "case 1: one or more OSD deployment is missing crush location label", - osds: []appsv1.Deployment{fakeOSDDeployment(1, 1), fakeOSDDeployment(2, 1), - fakeOSDDeployment(3, 1)}, - expectedAllFailureDomains: nil, - expectedDrainingFailureDomains: nil, - expectedOsdDownFailureDomains: nil, - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - osd := tc.osds[0].DeepCopy() - osd.Spec.Template.ObjectMeta.Labels["topology-location-zone"] = "" - r := getFakeReconciler(t, cephCluster, &corev1.ConfigMap{}, - tc.osds[1].DeepCopy(), tc.osds[2].DeepCopy(), osd) - clusterInfo := getFakeClusterInfo() - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: namespace}} - allfailureDomains, nodeDrainFailureDomains, osdDownFailureDomains, err := r.getOSDFailureDomains(clusterInfo, request, "zone") - assert.Error(t, err) - assert.Equal(t, tc.expectedAllFailureDomains, allfailureDomains) - assert.Equal(t, tc.expectedDrainingFailureDomains, nodeDrainFailureDomains) - assert.Equal(t, tc.expectedOsdDownFailureDomains, osdDownFailureDomains) - }) - } -} - -func TestReconcilePDBForOSD(t *testing.T) { - testcases := []struct { - name string - fakeCephStatus string - fakeOSDDump string - configMap *corev1.ConfigMap - allFailureDomains []string - osdDownFailureDomains []string - activeNodeDrains bool - expectedSetNoOutValue string - expectedOSDPDBCount int - expectedMaxUnavailableCount int - expectedDrainingFailureDomainName string - }{ - { - name: "case 1: no draining failure domain and all pgs are healthy", - fakeCephStatus: healthyCephStatus, - fakeOSDDump: `{"OSDs": [{"OSD": 3, "Up": 3, "In": 3}]}`, - allFailureDomains: []string{"zone-1", "zone-2", "zone-3"}, - osdDownFailureDomains: []string{}, - configMap: fakePDBConfigMap(""), - activeNodeDrains: false, - expectedSetNoOutValue: "", - expectedOSDPDBCount: 1, - expectedMaxUnavailableCount: 1, - expectedDrainingFailureDomainName: "", - }, - { - name: "case 2: zone-1 failure domain is draining and pgs are unhealthy", - fakeCephStatus: unHealthyCephStatus, - fakeOSDDump: `{"OSDs": [{"OSD": 3, "Up": 3, "In": 2}]}`, - allFailureDomains: []string{"zone-1", "zone-2", "zone-3"}, - osdDownFailureDomains: []string{"zone-1"}, - configMap: fakePDBConfigMap(""), - activeNodeDrains: true, - expectedSetNoOutValue: "true", - expectedOSDPDBCount: 2, - expectedMaxUnavailableCount: 0, - expectedDrainingFailureDomainName: "zone-1", - }, - { - name: "case 3: zone-1 is back online. But pgs are still unhealthy from zone-1 drain", - fakeCephStatus: unHealthyCephStatus, - fakeOSDDump: `{"OSDs": [{"OSD": 3, "Up": 3, "In": 3}]}`, - allFailureDomains: []string{"zone-1", "zone-2", "zone-3"}, - osdDownFailureDomains: []string{}, - configMap: fakePDBConfigMap("zone-1"), - activeNodeDrains: true, - expectedSetNoOutValue: "", - expectedOSDPDBCount: 2, - expectedMaxUnavailableCount: 0, - expectedDrainingFailureDomainName: "zone-1", - }, - { - name: "case 4: zone-1 is back online and pgs are also healthy", - fakeCephStatus: healthyCephStatus, - fakeOSDDump: `{"OSDs": [{"OSD": 3, "Up": 3, "In": 3}]}`, - allFailureDomains: []string{"zone-1", "zone-2", "zone-3"}, - osdDownFailureDomains: []string{}, - configMap: fakePDBConfigMap(""), - activeNodeDrains: true, - expectedSetNoOutValue: "", - expectedOSDPDBCount: 1, - expectedMaxUnavailableCount: 1, - expectedDrainingFailureDomainName: "", - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - r := getFakeReconciler(t, cephCluster, tc.configMap) - clusterInfo := getFakeClusterInfo() - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: namespace}} - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "status" { - return tc.fakeCephStatus, nil - } - if args[0] == "osd" && args[1] == "dump" { - return tc.fakeOSDDump, nil - } - return "", errors.Errorf("unexpected ceph command '%v'", args) - } - clientset := test.New(t, 3) - - // check for PDBV1 version - test.SetFakeKubernetesVersion(clientset, "v1.21.0") - r.context = &controllerconfig.Context{ClusterdContext: &clusterd.Context{Executor: executor, Clientset: clientset}} - _, err := r.reconcilePDBsForOSDs(clusterInfo, request, tc.configMap, "zone", tc.allFailureDomains, tc.osdDownFailureDomains, tc.activeNodeDrains) - assert.NoError(t, err) - - // assert that pdb for osd are created correctly - existingPDBsV1 := &policyv1.PodDisruptionBudgetList{} - err = r.client.List(context.TODO(), existingPDBsV1) - assert.NoError(t, err) - assert.Equal(t, tc.expectedOSDPDBCount, len(existingPDBsV1.Items)) - for _, pdb := range existingPDBsV1.Items { - assert.Equal(t, tc.expectedMaxUnavailableCount, pdb.Spec.MaxUnavailable.IntValue()) - } - // check for PDBV1Beta1 version - test.SetFakeKubernetesVersion(clientset, "v1.20.0") - r.context = &controllerconfig.Context{ClusterdContext: &clusterd.Context{Executor: executor, Clientset: clientset}} - _, err = r.reconcilePDBsForOSDs(clusterInfo, request, tc.configMap, "zone", tc.allFailureDomains, tc.osdDownFailureDomains, tc.activeNodeDrains) - assert.NoError(t, err) - existingPDBsV1Beta1 := &policyv1beta1.PodDisruptionBudgetList{} - err = r.client.List(context.TODO(), existingPDBsV1Beta1) - assert.NoError(t, err) - assert.Equal(t, tc.expectedOSDPDBCount, len(existingPDBsV1Beta1.Items)) - for _, pdb := range existingPDBsV1Beta1.Items { - assert.Equal(t, tc.expectedMaxUnavailableCount, pdb.Spec.MaxUnavailable.IntValue()) - } - - // assert that config map is updated with correct failure domain - existingConfigMaps := &corev1.ConfigMapList{} - err = r.client.List(context.TODO(), existingConfigMaps) - assert.NoError(t, err) - assert.Equal(t, tc.expectedDrainingFailureDomainName, existingConfigMaps.Items[0].Data[drainingFailureDomainKey]) - assert.Equal(t, tc.expectedSetNoOutValue, existingConfigMaps.Items[0].Data[setNoOut]) - }) - - } -} - -func TestPGHealthcheckTimeout(t *testing.T) { - pdbConfig := fakePDBConfigMap("") - r := getFakeReconciler(t, cephCluster, pdbConfig) - clusterInfo := getFakeClusterInfo() - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: namespace}} - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "status" { - return unHealthyCephStatus, nil - } - if args[0] == "osd" && args[1] == "dump" { - return `{"OSDs": [{"OSD": 3, "Up": 3, "In": 3}]}`, nil - } - return "", errors.Errorf("unexpected ceph command '%v'", args) - } - clientset := test.New(t, 3) - r.context = &controllerconfig.Context{ClusterdContext: &clusterd.Context{Executor: executor, Clientset: clientset}} - // set PG health check timeout to 10 minutes - r.pgHealthCheckTimeout = time.Duration(time.Minute * 10) - - // reconcile OSD PDB with active drains (on zone-1) and unhealthy PGs - _, err := r.reconcilePDBsForOSDs(clusterInfo, request, pdbConfig, "zone", []string{"zone-1", "zone-2"}, []string{"zone-1"}, true) - assert.NoError(t, err) - assert.Equal(t, "zone-1", pdbConfig.Data[drainingFailureDomainKey]) - assert.Equal(t, "true", pdbConfig.Data[setNoOut]) - - // update the pgHealthCheckDuration time by -9 minutes - pdbConfig.Data[pgHealthCheckDurationKey] = time.Now().Add(time.Duration(-7) * time.Minute).Format(time.RFC3339) - // reconcile OSD PDB with no active drains and unhealthy PGs - _, err = r.reconcilePDBsForOSDs(clusterInfo, request, pdbConfig, "zone", []string{"zone-1", "zone-2"}, []string{}, true) - assert.NoError(t, err) - // assert that pdb config map was not reset as the PG health check was not timed out - assert.Equal(t, "zone-1", pdbConfig.Data[drainingFailureDomainKey]) - assert.Equal(t, "true", pdbConfig.Data[setNoOut]) - - // update the drainingFailureDomain time by -9 minutes - pdbConfig.Data[pgHealthCheckDurationKey] = time.Now().Add(time.Duration(-11) * time.Minute).Format(time.RFC3339) - // reconcile OSD PDB with no active drains and unhealthy PGs - _, err = r.reconcilePDBsForOSDs(clusterInfo, request, pdbConfig, "zone", []string{"zone-1", "zone-2"}, []string{}, false) - assert.NoError(t, err) - // assert that pdb config map was reset as the PG health check was timed out - assert.Equal(t, "", pdbConfig.Data[drainingFailureDomainKey]) - assert.Equal(t, "", pdbConfig.Data[setNoOut]) -} - -func TestHasNodeDrained(t *testing.T) { - osdPOD := fakeOSDPod(0, nodeName) - // Not expecting node drain because OSD pod is assigned to a schedulable node - r := getFakeReconciler(t, nodeObj, osdPOD.DeepCopy(), &corev1.ConfigMap{}) - expected, err := hasOSDNodeDrained(r.client, namespace, "0") - assert.NoError(t, err) - assert.False(t, expected) - - // Expecting node drain because OSD pod is assigned to an unschedulable node - r = getFakeReconciler(t, unschedulableNodeObj, osdPOD.DeepCopy(), &corev1.ConfigMap{}) - expected, err = hasOSDNodeDrained(r.client, namespace, "0") - assert.NoError(t, err) - assert.True(t, expected) - - // Expecting node drain because OSD pod is not assigned to any node - osdPodObj := osdPOD.DeepCopy() - osdPodObj.Spec.NodeName = "" - r = getFakeReconciler(t, nodeObj, osdPodObj, &corev1.ConfigMap{}) - expected, err = hasOSDNodeDrained(r.client, namespace, "0") - assert.NoError(t, err) - assert.True(t, expected) -} diff --git a/pkg/operator/ceph/disruption/clusterdisruption/pools.go b/pkg/operator/ceph/disruption/clusterdisruption/pools.go deleted file mode 100644 index 41f1d6988..000000000 --- a/pkg/operator/ceph/disruption/clusterdisruption/pools.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterdisruption - -import ( - "context" - "fmt" - - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/k8sutil" - policyv1 "k8s.io/api/policy/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -func (r *ReconcileClusterDisruption) processPools(request reconcile.Request) (*cephv1.CephObjectStoreList, *cephv1.CephFilesystemList, string, int, error) { - namespaceListOpt := client.InNamespace(request.Namespace) - poolSpecs := make([]cephv1.PoolSpec, 0) - poolCount := 0 - cephBlockPoolList := &cephv1.CephBlockPoolList{} - err := r.client.List(context.TODO(), cephBlockPoolList, namespaceListOpt) - if err != nil { - return nil, nil, "", poolCount, errors.Wrapf(err, "could not list the CephBlockpools %v", request.NamespacedName) - } - poolCount += len(cephBlockPoolList.Items) - for _, cephBlockPool := range cephBlockPoolList.Items { - poolSpecs = append(poolSpecs, cephBlockPool.Spec) - } - - cephFilesystemList := &cephv1.CephFilesystemList{} - err = r.client.List(context.TODO(), cephFilesystemList, namespaceListOpt) - if err != nil { - return nil, nil, "", poolCount, errors.Wrapf(err, "could not list the CephFilesystems %v", request.NamespacedName) - } - poolCount += len(cephFilesystemList.Items) - for _, cephFilesystem := range cephFilesystemList.Items { - poolSpecs = append(poolSpecs, cephFilesystem.Spec.MetadataPool) - poolSpecs = append(poolSpecs, cephFilesystem.Spec.DataPools...) - - } - - cephObjectStoreList := &cephv1.CephObjectStoreList{} - err = r.client.List(context.TODO(), cephObjectStoreList, namespaceListOpt) - if err != nil { - return nil, nil, "", poolCount, errors.Wrapf(err, "could not list the CephObjectStores %v", request.NamespacedName) - } - poolCount += len(cephObjectStoreList.Items) - for _, cephObjectStore := range cephObjectStoreList.Items { - poolSpecs = append(poolSpecs, cephObjectStore.Spec.MetadataPool) - poolSpecs = append(poolSpecs, cephObjectStore.Spec.DataPool) - - } - minFailureDomain := getMinimumFailureDomain(poolSpecs) - - return cephObjectStoreList, cephFilesystemList, minFailureDomain, poolCount, nil - -} - -func getMinimumFailureDomain(poolList []cephv1.PoolSpec) string { - if len(poolList) == 0 { - return cephv1.DefaultFailureDomain - } - - //start with max as the min - minfailureDomainIndex := len(osd.CRUSHMapLevelsOrdered) - 1 - matched := false - - for _, pool := range poolList { - for index, failureDomain := range osd.CRUSHMapLevelsOrdered { - if index == minfailureDomainIndex { - // index is higher-than/equal-to the min - break - } - if pool.FailureDomain == failureDomain { - // new min found - matched = true - minfailureDomainIndex = index - } - } - } - if !matched { - logger.Debugf("could not match failure domain. defaulting to %q", cephv1.DefaultFailureDomain) - return cephv1.DefaultFailureDomain - } - return osd.CRUSHMapLevelsOrdered[minfailureDomainIndex] -} - -// Setting naive minAvailable for RGW at: n - 1 -func (r *ReconcileClusterDisruption) reconcileCephObjectStore(cephObjectStoreList *cephv1.CephObjectStoreList) error { - for _, objectStore := range cephObjectStoreList.Items { - storeName := objectStore.ObjectMeta.Name - namespace := objectStore.ObjectMeta.Namespace - pdbName := fmt.Sprintf("rook-ceph-rgw-%s", storeName) - labelSelector := &metav1.LabelSelector{ - MatchLabels: map[string]string{"rgw": storeName}, - } - - rgwCount := objectStore.Spec.Gateway.Instances - minAvailable := &intstr.IntOrString{IntVal: rgwCount - 1} - if minAvailable.IntVal < 1 { - continue - } - blockOwnerDeletion := false - objectMeta := metav1.ObjectMeta{ - Name: pdbName, - Namespace: namespace, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: objectStore.APIVersion, - Kind: objectStore.Kind, - Name: objectStore.ObjectMeta.Name, - UID: objectStore.UID, - BlockOwnerDeletion: &blockOwnerDeletion, - }, - }, - } - usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(r.context.ClusterdContext.Clientset) - if err != nil { - return errors.Wrap(err, "failed to fetch pdb version") - } - if usePDBV1Beta1 { - pdb := &policyv1beta1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - Spec: policyv1beta1.PodDisruptionBudgetSpec{ - Selector: labelSelector, - MinAvailable: minAvailable, - }, - } - request := types.NamespacedName{Name: pdbName, Namespace: namespace} - err = r.reconcileStaticPDB(request, pdb) - if err != nil { - return errors.Wrapf(err, "failed to reconcile cephobjectstore pdb %v", request) - } - continue - } - pdb := &policyv1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - Spec: policyv1.PodDisruptionBudgetSpec{ - Selector: labelSelector, - MinAvailable: minAvailable, - }, - } - request := types.NamespacedName{Name: pdbName, Namespace: namespace} - err = r.reconcileStaticPDB(request, pdb) - if err != nil { - return errors.Wrapf(err, "failed to reconcile cephobjectstore pdb %v", request) - } - } - return nil -} - -// Setting naive minAvailable for MDS at: n -1 -// getting n from the cephfilesystem.spec.metadataserver.activecount -func (r *ReconcileClusterDisruption) reconcileCephFilesystem(cephFilesystemList *cephv1.CephFilesystemList) error { - for _, filesystem := range cephFilesystemList.Items { - fsName := filesystem.ObjectMeta.Name - namespace := filesystem.ObjectMeta.Namespace - pdbName := fmt.Sprintf("rook-ceph-mds-%s", fsName) - labelSelector := &metav1.LabelSelector{ - MatchLabels: map[string]string{"rook_file_system": fsName}, - } - - activeCount := filesystem.Spec.MetadataServer.ActiveCount - minAvailable := &intstr.IntOrString{IntVal: activeCount - 1} - if filesystem.Spec.MetadataServer.ActiveStandby { - minAvailable.IntVal++ - } - if minAvailable.IntVal < 1 { - continue - } - blockOwnerDeletion := false - objectMeta := metav1.ObjectMeta{ - Name: pdbName, - Namespace: namespace, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: filesystem.APIVersion, - Kind: filesystem.Kind, - Name: filesystem.ObjectMeta.Name, - UID: filesystem.UID, - BlockOwnerDeletion: &blockOwnerDeletion, - }, - }, - } - usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(r.context.ClusterdContext.Clientset) - if err != nil { - return errors.Wrap(err, "failed to fetch pdb version") - } - if usePDBV1Beta1 { - pdb := &policyv1beta1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - Spec: policyv1beta1.PodDisruptionBudgetSpec{ - Selector: labelSelector, - MinAvailable: minAvailable, - }, - } - request := types.NamespacedName{Name: pdbName, Namespace: namespace} - err := r.reconcileStaticPDB(request, pdb) - if err != nil { - return errors.Wrapf(err, "failed to reconcile cephfs pdb %v", request) - } - continue - } - pdb := &policyv1.PodDisruptionBudget{ - ObjectMeta: objectMeta, - Spec: policyv1.PodDisruptionBudgetSpec{ - Selector: labelSelector, - MinAvailable: minAvailable, - }, - } - request := types.NamespacedName{Name: pdbName, Namespace: namespace} - err = r.reconcileStaticPDB(request, pdb) - if err != nil { - return errors.Wrapf(err, "failed to reconcile cephfs pdb %v", request) - } - } - return nil -} diff --git a/pkg/operator/ceph/disruption/clusterdisruption/pools_test.go b/pkg/operator/ceph/disruption/clusterdisruption/pools_test.go deleted file mode 100644 index be524563d..000000000 --- a/pkg/operator/ceph/disruption/clusterdisruption/pools_test.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterdisruption - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" -) - -func TestGetMinimumFailureDomain(t *testing.T) { - poolList := []cephv1.PoolSpec{ - {FailureDomain: "region"}, - {FailureDomain: "zone"}, - } - - assert.Equal(t, "zone", getMinimumFailureDomain(poolList)) - - poolList = []cephv1.PoolSpec{ - {FailureDomain: "region"}, - {FailureDomain: "zone"}, - {FailureDomain: "host"}, - } - - assert.Equal(t, "host", getMinimumFailureDomain(poolList)) - - // test default - poolList = []cephv1.PoolSpec{ - {FailureDomain: "aaa"}, - {FailureDomain: "bbb"}, - {FailureDomain: "ccc"}, - } - - assert.Equal(t, "host", getMinimumFailureDomain(poolList)) - -} diff --git a/pkg/operator/ceph/disruption/clusterdisruption/reconcile.go b/pkg/operator/ceph/disruption/clusterdisruption/reconcile.go deleted file mode 100644 index ba4a45542..000000000 --- a/pkg/operator/ceph/disruption/clusterdisruption/reconcile.go +++ /dev/null @@ -1,281 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterdisruption - -import ( - "context" - "sync" - "time" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - - "github.com/coreos/pkg/capnslog" - cephClient "github.com/rook/rook/pkg/daemon/ceph/client" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig" - "github.com/rook/rook/pkg/operator/k8sutil" - policyv1 "k8s.io/api/policy/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" -) - -const ( - controllerName = "clusterdisruption-controller" - // pdbStateMapName for the clusterdisruption pdb state map - pdbStateMapName = "rook-ceph-pdbstatemap" - legacyOSDPDBLabel = "rook-ceph-osd-pdb" - legacyDrainCanaryLabel = "rook-ceph-drain-canary" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - - // Implement reconcile.Reconciler so the controller can reconcile objects - _ reconcile.Reconciler = &ReconcileClusterDisruption{} - - // delete legacy drain canary pods and blocking OSD podDisruptionBudgets - deleteLegacyResources = true -) - -// ReconcileClusterDisruption reconciles ReplicaSets -type ReconcileClusterDisruption struct { - // client can be used to retrieve objects from the APIServer. - scheme *runtime.Scheme - client client.Client - context *controllerconfig.Context - clusterMap *ClusterMap - maintenanceTimeout time.Duration - pgHealthCheckTimeout time.Duration -} - -// Reconcile reconciles a node and ensures that it has a drain-detection deployment -// attached to it. -// The Controller will requeue the Request to be processed again if an error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileClusterDisruption) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // wrapping reconcile because the rook logging mechanism is not compatible with the controller-runtime logging interface - result, err := r.reconcile(request) - if err != nil { - logger.Error(err) - } - return result, err -} - -func (r *ReconcileClusterDisruption) reconcile(request reconcile.Request) (reconcile.Result, error) { - if request.Namespace == "" { - return reconcile.Result{}, errors.Errorf("request did not have namespace: %q", request.NamespacedName) - } - - logger.Debugf("reconciling %q", request.NamespacedName) - - // get the ceph cluster - cephClusters := &cephv1.CephClusterList{} - if err := r.client.List(context.TODO(), cephClusters, client.InNamespace(request.Namespace)); err != nil { - return reconcile.Result{}, errors.Wrapf(err, "could not get cephclusters in namespace %q", request.Namespace) - } - if len(cephClusters.Items) == 0 { - logger.Errorf("cephcluster %q seems to be deleted, not requeuing until triggered again", request) - return reconcile.Result{Requeue: false}, nil - } - - cephCluster := cephClusters.Items[0] - - // update the clustermap with the cluster's name so that - // events on resources associated with the cluster can trigger reconciliation by namespace - r.clusterMap.UpdateClusterMap(request.Namespace, &cephCluster) - - // get the cluster info - clusterInfo := r.clusterMap.GetClusterInfo(request.Namespace) - if clusterInfo == nil { - logger.Infof("clusterName is not known for namespace %q", request.Namespace) - return reconcile.Result{Requeue: true, RequeueAfter: 5 * time.Second}, errors.New("clusterName for this namespace not yet known") - } - - // ensure that the cluster name is populated - if request.Name == "" { - request.Name = clusterInfo.NamespacedName().Name - } - - if !cephCluster.Spec.DisruptionManagement.ManagePodBudgets { - // feature disabled for this cluster. not requeueing - return reconcile.Result{Requeue: false}, nil - } - - if deleteLegacyResources { - // delete any legacy blocking PDBs for osd - err := r.deleteLegacyPDBForOSD(clusterInfo.Namespace) - if err != nil { - return reconcile.Result{}, err - } - logger.Info("deleted all legacy blocking PDBs for osds") - - // delete any legacy node drain canary pods - err = r.deleteDrainCanaryPods(clusterInfo.Namespace) - if err != nil { - return reconcile.Result{}, err - } - logger.Info("deleted all legacy node drain canary pods") - - deleteLegacyResources = false - } - - r.maintenanceTimeout = cephCluster.Spec.DisruptionManagement.OSDMaintenanceTimeout * time.Minute - if r.maintenanceTimeout == 0 { - r.maintenanceTimeout = DefaultMaintenanceTimeout - logger.Debugf("Using default maintenance timeout: %v", r.maintenanceTimeout) - } - - r.pgHealthCheckTimeout = cephCluster.Spec.DisruptionManagement.PGHealthCheckTimeout * time.Minute - - // reconcile the pools and get the failure domain - cephObjectStoreList, cephFilesystemList, poolFailureDomain, poolCount, err := r.processPools(request) - if err != nil { - return reconcile.Result{}, err - } - - // reconcile the pdbs for objectstores - err = r.reconcileCephObjectStore(cephObjectStoreList) - if err != nil { - return reconcile.Result{}, err - } - - // reconcile the pdbs for filesystems - err = r.reconcileCephFilesystem(cephFilesystemList) - if err != nil { - return reconcile.Result{}, err - } - - // no pools, no need to reconcile OSD PDB - if poolCount < 1 { - return reconcile.Result{}, nil - } - - // get a list of all the failure domains, failure domains with failed OSDs and failure domains with drained nodes - allFailureDomains, nodeDrainFailureDomains, osdDownFailureDomains, err := r.getOSDFailureDomains(clusterInfo, request, poolFailureDomain) - if err != nil { - return reconcile.Result{}, err - } - - // get the map that stores currently draining failure domain - pdbStateMap, err := r.initializePDBState(request) - if err != nil { - return reconcile.Result{}, err - } - - activeNodeDrains := len(nodeDrainFailureDomains) > 0 - return r.reconcilePDBsForOSDs(clusterInfo, request, pdbStateMap, poolFailureDomain, allFailureDomains, osdDownFailureDomains, activeNodeDrains) -} - -// ClusterMap maintains the association between namespace and clusername -type ClusterMap struct { - clusterMap map[string]*cephv1.CephCluster - mux sync.Mutex -} - -// UpdateClusterMap to populate the clusterName for the namespace -func (c *ClusterMap) UpdateClusterMap(namespace string, cluster *cephv1.CephCluster) { - defer c.mux.Unlock() - c.mux.Lock() - if len(c.clusterMap) == 0 { - c.clusterMap = make(map[string]*cephv1.CephCluster) - } - c.clusterMap[namespace] = cluster - -} - -// GetClusterInfo looks up the context for the current ceph cluster. -// found is the boolean indicating whether a cluster was populated for that namespace or not. -func (c *ClusterMap) GetClusterInfo(namespace string) *cephClient.ClusterInfo { - defer c.mux.Unlock() - c.mux.Lock() - - if len(c.clusterMap) == 0 { - c.clusterMap = make(map[string]*cephv1.CephCluster) - } - - cluster, ok := c.clusterMap[namespace] - if !ok { - return nil - } - - clusterInfo := cephClient.NewClusterInfo(namespace, cluster.ObjectMeta.GetName()) - clusterInfo.CephCred.Username = cephClient.AdminUsername - return clusterInfo -} - -// GetCluster returns vars cluster, found. cluster is the cephcluster associated -// with that namespace and found is the boolean indicating whether a cluster was -// populated for that namespace or not. -func (c *ClusterMap) GetCluster(namespace string) (*cephv1.CephCluster, bool) { - defer c.mux.Unlock() - c.mux.Lock() - - if len(c.clusterMap) == 0 { - c.clusterMap = make(map[string]*cephv1.CephCluster) - } - - cluster, ok := c.clusterMap[namespace] - if !ok { - return nil, false - } - - return cluster, true -} - -// GetClusterNamespaces returns the internal clustermap for iteration purporses -func (c *ClusterMap) GetClusterNamespaces() []string { - defer c.mux.Unlock() - c.mux.Lock() - var namespaces []string - for _, cluster := range c.clusterMap { - namespaces = append(namespaces, cluster.Namespace) - } - return namespaces -} - -func (r *ReconcileClusterDisruption) deleteDrainCanaryPods(namespace string) error { - err := r.client.DeleteAllOf(context.TODO(), &appsv1.Deployment{}, client.InNamespace(namespace), - client.MatchingLabels{k8sutil.AppAttr: legacyDrainCanaryLabel}) - if err != nil && !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to delete all the legacy drain-canary pods with label %q", legacyDrainCanaryLabel) - } - return nil -} - -func (r *ReconcileClusterDisruption) deleteLegacyPDBForOSD(namespace string) error { - var podDisruptionBudget client.Object - usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(r.context.ClusterdContext.Clientset) - if err != nil { - return errors.Wrap(err, "failed to fetch pdb version") - } - if usePDBV1Beta1 { - podDisruptionBudget = &policyv1beta1.PodDisruptionBudget{} - } else { - podDisruptionBudget = &policyv1.PodDisruptionBudget{} - } - err = r.client.DeleteAllOf(context.TODO(), podDisruptionBudget, client.InNamespace(namespace), - client.MatchingLabels{k8sutil.AppAttr: legacyOSDPDBLabel}) - if err != nil && !kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to delete legacy OSD PDBs with label %q", legacyOSDPDBLabel) - } - return nil -} diff --git a/pkg/operator/ceph/disruption/clusterdisruption/reconcile_test.go b/pkg/operator/ceph/disruption/clusterdisruption/reconcile_test.go deleted file mode 100644 index 5f1840226..000000000 --- a/pkg/operator/ceph/disruption/clusterdisruption/reconcile_test.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterdisruption - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestClusterMap(t *testing.T) { - - sharedClusterMap := &ClusterMap{} - - clusterInfo := sharedClusterMap.GetClusterInfo("rook-ceph-0") - assert.Nil(t, clusterInfo) - - sharedClusterMap.UpdateClusterMap("rook-ceph-0", &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Name: "ceph-cluster-0"}}) - sharedClusterMap.UpdateClusterMap("rook-ceph-1", &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Name: "ceph-cluster-1"}}) - sharedClusterMap.UpdateClusterMap("rook-ceph-2", &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Name: "ceph-cluster-2"}}) - clusterInfo = sharedClusterMap.GetClusterInfo("rook-ceph-0") - assert.NotNil(t, clusterInfo) - assert.Equal(t, clusterInfo.NamespacedName().Name, "ceph-cluster-0") - assert.Equal(t, clusterInfo.NamespacedName().Namespace, "rook-ceph-0") - assert.Equal(t, clusterInfo.Namespace, "rook-ceph-0") - - clusterInfo = sharedClusterMap.GetClusterInfo("storage-namespace") - assert.Nil(t, clusterInfo) - - namespaces := sharedClusterMap.GetClusterNamespaces() - assert.Equal(t, 3, len(namespaces)) -} diff --git a/pkg/operator/ceph/disruption/clusterdisruption/static_pdb.go b/pkg/operator/ceph/disruption/clusterdisruption/static_pdb.go deleted file mode 100644 index 68642faf8..000000000 --- a/pkg/operator/ceph/disruption/clusterdisruption/static_pdb.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clusterdisruption - -import ( - "context" - - "github.com/pkg/errors" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/rook/rook/pkg/operator/k8sutil" - policyv1 "k8s.io/api/policy/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" -) - -func (r *ReconcileClusterDisruption) createStaticPDB(pdb client.Object) error { - err := r.client.Create(context.TODO(), pdb) - if err != nil { - return errors.Wrapf(err, "failed to create pdb %q", pdb.GetName()) - } - return nil -} - -func (r *ReconcileClusterDisruption) reconcileStaticPDB(request types.NamespacedName, pdb client.Object) error { - var existingPDB client.Object - usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(r.context.ClusterdContext.Clientset) - if err != nil { - return errors.Wrap(err, "failed to fetch pdb version") - } - if usePDBV1Beta1 { - existingPDB = &policyv1beta1.PodDisruptionBudget{} - } else { - existingPDB = &policyv1.PodDisruptionBudget{} - } - err = r.client.Get(context.TODO(), request, existingPDB) - if err != nil { - if apierrors.IsNotFound(err) { - return r.createStaticPDB(pdb) - } - return errors.Wrapf(err, "failed to get pdb %q", pdb.GetName()) - } - - return nil -} diff --git a/pkg/operator/ceph/disruption/controllerconfig/context.go b/pkg/operator/ceph/disruption/controllerconfig/context.go deleted file mode 100644 index 97df5f7d5..000000000 --- a/pkg/operator/ceph/disruption/controllerconfig/context.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllerconfig - -import ( - "sync" - - "github.com/rook/rook/pkg/clusterd" -) - -// Context passed to the controller when associating it with the manager. -type Context struct { - ClusterdContext *clusterd.Context - RookImage string - OperatorNamespace string - ReconcileCanaries *LockingBool -} - -// LockingBool is a bool coupled with a sync.Mutex -type LockingBool struct { - value bool - mux sync.Mutex -} - -// Get bool -func (b *LockingBool) Get() bool { - b.mux.Lock() - defer b.mux.Unlock() - return b.value -} - -// Update bool -func (b *LockingBool) Update(newValue bool) { - b.mux.Lock() - defer b.mux.Unlock() - b.value = newValue -} diff --git a/pkg/operator/ceph/disruption/controllerconfig/toleration.go b/pkg/operator/ceph/disruption/controllerconfig/toleration.go deleted file mode 100644 index 62be538a8..000000000 --- a/pkg/operator/ceph/disruption/controllerconfig/toleration.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllerconfig - -import ( - "fmt" - "sort" - "strings" - - corev1 "k8s.io/api/core/v1" -) - -// TolerationSet is a set of unique tolerations. -type TolerationSet struct { - tolerations map[string]corev1.Toleration -} - -// Add adds a toleration to the TolerationSet -func (t *TolerationSet) Add(toleration corev1.Toleration) { - key := getKey(toleration) - if len(t.tolerations) == 0 { - t.tolerations = make(map[string]corev1.Toleration) - } - t.tolerations[key] = toleration -} - -func getKey(toleration corev1.Toleration) string { - return fmt.Sprintf("%s-%s-%s-%s", toleration.Key, toleration.Operator, toleration.Effect, toleration.Value) -} - -// ToList returns a list of all tolerations in the set. The order will always be the same for the same set. -func (t *TolerationSet) ToList() []corev1.Toleration { - tolerationList := make([]corev1.Toleration, 0) - for _, toleration := range t.tolerations { - tolerationList = append(tolerationList, toleration) - } - sort.SliceStable(tolerationList, func(i, j int) bool { - a := getKey(tolerationList[i]) - b := getKey(tolerationList[j]) - return strings.Compare(a, b) == -1 - }) - return tolerationList -} diff --git a/pkg/operator/ceph/disruption/controllerconfig/toleration_test.go b/pkg/operator/ceph/disruption/controllerconfig/toleration_test.go deleted file mode 100644 index b9894cdf5..000000000 --- a/pkg/operator/ceph/disruption/controllerconfig/toleration_test.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllerconfig - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - corev1 "k8s.io/api/core/v1" -) - -func TestTolerationSet(t *testing.T) { - uniqueTolerationsManualA := []corev1.Toleration{ - // key1 - // exists - { - Key: "key1", - Operator: corev1.TolerationOpExists, - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key1", - Operator: corev1.TolerationOpExists, - Effect: corev1.TaintEffectPreferNoSchedule, - }, - // equals with different values - { - Key: "key1", - Operator: corev1.TolerationOpEqual, - Value: "value1", - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key1", - Operator: corev1.TolerationOpEqual, - Value: "value2", - Effect: corev1.TaintEffectNoSchedule, - }, - - // with different effects - { - Key: "key1", - Operator: corev1.TolerationOpEqual, - Value: "value2", - Effect: corev1.TaintEffectNoExecute, - }, - // key2 - { - Key: "key2", - Operator: corev1.TolerationOpExists, - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key2", - Operator: corev1.TolerationOpEqual, - Value: "value1", - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key2", - Operator: corev1.TolerationOpEqual, - Value: "value2", - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key2", - Operator: corev1.TolerationOpEqual, - Value: "value2", - Effect: corev1.TaintEffectNoExecute, - }, - } - //identical to uniqueTolerationsManualA - uniqueTolerationsManualB := []corev1.Toleration{ - // key1 - // exists - { - Key: "key1", - Operator: corev1.TolerationOpExists, - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key1", - Operator: corev1.TolerationOpExists, - Effect: corev1.TaintEffectPreferNoSchedule, - }, - // equals with different values - { - Key: "key1", - Operator: corev1.TolerationOpEqual, - Value: "value1", - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key1", - Operator: corev1.TolerationOpEqual, - Value: "value2", - Effect: corev1.TaintEffectNoSchedule, - }, - - // with different effects - { - Key: "key1", - Operator: corev1.TolerationOpEqual, - Value: "value2", - Effect: corev1.TaintEffectNoExecute, - }, - // key2 - { - Key: "key2", - Operator: corev1.TolerationOpExists, - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key2", - Operator: corev1.TolerationOpEqual, - Value: "value1", - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key2", - Operator: corev1.TolerationOpEqual, - Value: "value2", - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key2", - Operator: corev1.TolerationOpEqual, - Value: "value2", - Effect: corev1.TaintEffectNoExecute, - }, - } - - tolerationsWithDuplicates := make([]corev1.Toleration, 0) - for i := range uniqueTolerationsManualA { - tolerationsWithDuplicates = append(tolerationsWithDuplicates, uniqueTolerationsManualA[i]) - - //append the previous one again if it's within range, else append the last one - if i > 0 { - tolerationsWithDuplicates = append(tolerationsWithDuplicates, uniqueTolerationsManualB[i-1]) - } else { - tolerationsWithDuplicates = append(tolerationsWithDuplicates, uniqueTolerationsManualB[len(uniqueTolerationsManualB)-1]) - } - } - uniqueTolerationsMap := &TolerationSet{} - for _, toleration := range tolerationsWithDuplicates { - uniqueTolerationsMap.Add(toleration) - } - - uniqueTolerations := uniqueTolerationsMap.ToList() - - assert.Equal(t, len(uniqueTolerationsManualA), len(uniqueTolerations)) - for _, tolerationI := range uniqueTolerationsManualA { - found := false - for _, tolerationJ := range uniqueTolerations { - if tolerationI == tolerationJ { - found = true - } - } - assert.True(t, found) - } -} diff --git a/pkg/operator/ceph/disruption/machinedisruption/add.go b/pkg/operator/ceph/disruption/machinedisruption/add.go deleted file mode 100644 index 9f93b80d5..000000000 --- a/pkg/operator/ceph/disruption/machinedisruption/add.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machinedisruption - -import ( - healthchecking "github.com/openshift/machine-api-operator/pkg/apis/healthchecking/v1alpha1" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -// Add adds a new Controller to the Manager based on machinedisruption.ReconcileMachineDisruption and registers the relevant watches and handlers. -// Read more about how Managers, Controllers, and their Watches, Handlers, Predicates, etc work here: -// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg -func Add(mgr manager.Manager, ctx *controllerconfig.Context) error { - mgrScheme := mgr.GetScheme() - if err := healthchecking.AddToScheme(mgrScheme); err != nil { - return errors.Wrap(err, "failed to add to healthchecking scheme") - } - if err := cephv1.AddToScheme(mgrScheme); err != nil { - return errors.Wrap(err, "failed to add to ceph scheme") - } - - reconcileMachineDisruption := &MachineDisruptionReconciler{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: ctx, - } - - // TODO CHANGE ME (the context) - reconciler := reconcile.Reconciler(reconcileMachineDisruption) - // create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: reconciler}) - if err != nil { - return err - } - - err = c.Watch(&source.Kind{Type: &cephv1.CephCluster{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return err - } - - return c.Watch(&source.Kind{Type: &healthchecking.MachineDisruptionBudget{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cephv1.CephCluster{}, - }) -} diff --git a/pkg/operator/ceph/disruption/machinedisruption/doc.go b/pkg/operator/ceph/disruption/machinedisruption/doc.go deleted file mode 100644 index e379c2297..000000000 --- a/pkg/operator/ceph/disruption/machinedisruption/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package machinedisruption ensures that openshift fencing doesn't interfere with running ceph resources in a way that results in data loss/unavailability. -The design and purpose for machinedisruption management is found at: -https://github.com/rook/rook/blob/master/design/ceph/ceph-openshift-fencing-mitigation.md -*/ - -package machinedisruption diff --git a/pkg/operator/ceph/disruption/machinedisruption/reconcile.go b/pkg/operator/ceph/disruption/machinedisruption/reconcile.go deleted file mode 100644 index d9ea88157..000000000 --- a/pkg/operator/ceph/disruption/machinedisruption/reconcile.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machinedisruption - -import ( - "context" - "fmt" - "time" - - "github.com/coreos/pkg/capnslog" - healthchecking "github.com/openshift/machine-api-operator/pkg/apis/healthchecking/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - cephClient "github.com/rook/rook/pkg/daemon/ceph/client" - - "github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig" - "github.com/rook/rook/pkg/operator/ceph/disruption/machinelabel" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - controllerName = "machinedisruption-controller" - MDBCephClusterNamespaceLabelKey = "rook.io/cephClusterNamespace" - MDBCephClusterNameLabelKey = "rook.io/cephClusterName" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -// MachineDisruptionReconciler reconciles MachineDisruption -type MachineDisruptionReconciler struct { - scheme *runtime.Scheme - client client.Client - context *controllerconfig.Context -} - -// Reconcile is the implementation of reconcile function for MachineDisruptionReconciler -// which ensures that the machineDisruptionBudget for the rook ceph cluster is in correct state -// The Controller will requeue the Request to be processed again if an error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *MachineDisruptionReconciler) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // wrapping reconcile because the rook logging mechanism is not compatible with the controller-runtime logging interface - result, err := r.reconcile(request) - if err != nil { - logger.Error(err) - } - return result, err -} - -func (r *MachineDisruptionReconciler) reconcile(request reconcile.Request) (reconcile.Result, error) { - logger.Debugf("reconciling %s", request.NamespacedName) - - // Fetching the cephCluster - cephClusterInstance := &cephv1.CephCluster{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephClusterInstance) - if kerrors.IsNotFound(err) { - logger.Infof("cephCluster instance not found for %s", request.NamespacedName) - return reconcile.Result{}, nil - } else if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "could not fetch cephCluster %s", request.Name) - } - - // skipping the reconcile since the feature is switched off - if !cephClusterInstance.Spec.DisruptionManagement.ManageMachineDisruptionBudgets { - logger.Debugf("Skipping reconcile for cephCluster %s as manageMachineDisruption is turned off", request.NamespacedName) - return reconcile.Result{}, nil - } - - mdb := &healthchecking.MachineDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: generateMDBInstanceName(request.Name, request.Namespace), - Namespace: cephClusterInstance.Spec.DisruptionManagement.MachineDisruptionBudgetNamespace, - }, - } - - err = r.client.Get(context.TODO(), types.NamespacedName{Name: mdb.GetName(), Namespace: mdb.GetNamespace()}, mdb) - if kerrors.IsNotFound(err) { - // If the MDB is not found creating the MDB for the cephCluster - maxUnavailable := int32(0) - // Generating the MDB instance for the cephCluster - newMDB := &healthchecking.MachineDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: generateMDBInstanceName(request.Name, request.Namespace), - Namespace: cephClusterInstance.Spec.DisruptionManagement.MachineDisruptionBudgetNamespace, - Labels: map[string]string{ - MDBCephClusterNamespaceLabelKey: request.Namespace, - MDBCephClusterNameLabelKey: request.Name, - }, - }, - Spec: healthchecking.MachineDisruptionBudgetSpec{ - MaxUnavailable: &maxUnavailable, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - machinelabel.MachineFencingLabelKey: request.Name, - machinelabel.MachineFencingNamespaceLabelKey: request.Namespace, - }, - }, - }, - } - err = controllerutil.SetControllerReference(cephClusterInstance, newMDB, r.scheme) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to set owner reference of mdb %q", newMDB.Name) - } - err = r.client.Create(context.TODO(), newMDB) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to create mdb %s", mdb.GetName()) - } - return reconcile.Result{}, nil - } else if err != nil { - return reconcile.Result{}, err - } - if mdb.Spec.MaxUnavailable == nil { - maxUnavailable := int32(0) - mdb.Spec.MaxUnavailable = &maxUnavailable - } - // Check if the cluster is clean or not - clusterInfo := cephClient.AdminClusterInfo(request.NamespacedName.Namespace) - _, isClean, err := cephClient.IsClusterClean(r.context.ClusterdContext, clusterInfo) - if err != nil { - maxUnavailable := int32(0) - mdb.Spec.MaxUnavailable = &maxUnavailable - err = r.client.Update(context.TODO(), mdb) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to update mdb %s", mdb.GetName()) - } - return reconcile.Result{}, errors.Wrapf(err, "failed to get cephCluster %s status", request.NamespacedName) - } - if isClean && *mdb.Spec.MaxUnavailable != 1 { - maxUnavailable := int32(1) - mdb.Spec.MaxUnavailable = &maxUnavailable - err = r.client.Update(context.TODO(), mdb) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to update mdb %s", mdb.GetName()) - } - } else if !isClean && *mdb.Spec.MaxUnavailable != 0 { - maxUnavailable := int32(0) - mdb.Spec.MaxUnavailable = &maxUnavailable - err = r.client.Update(context.TODO(), mdb) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to update mdb %s", mdb.GetName()) - } - } - return reconcile.Result{Requeue: true, RequeueAfter: time.Minute}, nil -} - -func generateMDBInstanceName(name, namespace string) string { - return fmt.Sprintf("%s-%s", name, namespace) -} diff --git a/pkg/operator/ceph/disruption/machinelabel/add.go b/pkg/operator/ceph/disruption/machinelabel/add.go deleted file mode 100644 index 53389b8a1..000000000 --- a/pkg/operator/ceph/disruption/machinelabel/add.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machinelabel - -import ( - mapiv1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - osdPodLabelKey = "app" - osdPODLabelValue = "rook-ceph-osd" - osdClusterNameLabelKey = "rook_cluster" -) - -// Add adds a new Controller to the Manager based on machinelabel.ReconcileMachineLabel and registers the relevant watches and handlers. -// Read more about how Managers, Controllers, and their Watches, Handlers, Predicates, etc work here: -// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg -func Add(mgr manager.Manager, context *controllerconfig.Context) error { - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgrScheme); err != nil { - return errors.Wrap(err, "failed to add scheme to ceph") - } - if err := mapiv1.AddToScheme(mgrScheme); err != nil { - return errors.Wrap(err, "failed to add scheme to map") - } - - reconcileMachineLabel := &ReconcileMachineLabel{ - client: mgr.GetClient(), - scheme: mgrScheme, - options: context, - } - - reconciler := reconcile.Reconciler(reconcileMachineLabel) - // create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: reconciler}) - if err != nil { - return errors.Wrapf(err, "could not create controller %q", controllerName) - } - - // Watch for the machines and enqueue the machineRequests if the machine is occupied by the osd pods - err = c.Watch(&source.Kind{Type: &mapiv1.Machine{}}, handler.EnqueueRequestsFromMapFunc( - handler.MapFunc(func(obj client.Object) []reconcile.Request { - clusterNamespace, isNamespacePresent := obj.GetLabels()[MachineFencingNamespaceLabelKey] - if !isNamespacePresent || len(clusterNamespace) == 0 { - return []reconcile.Request{} - } - clusterName, isClusterNamePresent := obj.GetLabels()[MachineFencingLabelKey] - if !isClusterNamePresent || len(clusterName) == 0 { - return []reconcile.Request{} - } - req := reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}} - return []reconcile.Request{req} - }), - )) - if err != nil { - return errors.Wrap(err, "could not watch machines") - } - - // Watch for the osd pods and enqueue the CephCluster in the namespace from the pods - return c.Watch(&source.Kind{Type: &corev1.Pod{}}, handler.EnqueueRequestsFromMapFunc( - handler.MapFunc(func(obj client.Object) []reconcile.Request { - _, ok := obj.(*corev1.Pod) - if !ok { - return []reconcile.Request{} - } - labels := obj.GetLabels() - if value, present := labels[osdPodLabelKey]; !present || value != osdPODLabelValue { - return []reconcile.Request{} - } - namespace := obj.GetNamespace() - rookClusterName, present := labels[osdClusterNameLabelKey] - if !present { - return []reconcile.Request{} - } - req := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: namespace, Name: rookClusterName}} - return []reconcile.Request{req} - }), - )) -} diff --git a/pkg/operator/ceph/disruption/machinelabel/doc.go b/pkg/operator/ceph/disruption/machinelabel/doc.go deleted file mode 100644 index d430ac5d4..000000000 --- a/pkg/operator/ceph/disruption/machinelabel/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package machinelabel implements the controller for ensuring that machines are labeled in correct manner for fencing. -The design and purpose for machine disruption management is found at: -https://github.com/rook/rook/blob/master/design/ceph/ceph-openshift-fencing-mitigation.md -*/ - -package machinelabel diff --git a/pkg/operator/ceph/disruption/machinelabel/reconcile.go b/pkg/operator/ceph/disruption/machinelabel/reconcile.go deleted file mode 100644 index ca4d6e581..000000000 --- a/pkg/operator/ceph/disruption/machinelabel/reconcile.go +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machinelabel - -import ( - "context" - - "github.com/coreos/pkg/capnslog" - mapiv1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/cluster/osd" - "github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig" - "github.com/rook/rook/pkg/operator/k8sutil" - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - controllerName = "machinelabel-controller" - MachineFencingLabelKey = "fencegroup.rook.io/cluster" - MachineFencingNamespaceLabelKey = "fencegroup.rook.io/clusterNamespace" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -type ReconcileMachineLabel struct { - scheme *runtime.Scheme - client client.Client - options *controllerconfig.Context -} - -type machine struct { - isOccupiedByOSD bool - RawMachine mapiv1.Machine -} - -// Reconcile is the implementation of reconcile function for ReconcileMachineLabel -// which ensures that the machineLabel for the osd pods are in correct state -// The Controller will requeue the Request to be processed again if an error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileMachineLabel) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // wrapping reconcile because the rook logging mechanism is not compatible with the controller-runtime logging interface - result, err := r.reconcile(request) - if err != nil { - logger.Error(err) - } - return result, err -} - -func (r *ReconcileMachineLabel) reconcile(request reconcile.Request) (reconcile.Result, error) { - logger.Debugf("reconciling %s", request.NamespacedName) - - // Fetch list of osd pods for the requested ceph cluster - pods := &corev1.PodList{} - err := r.client.List(context.TODO(), pods, client.InNamespace(request.Namespace), - client.MatchingLabels{k8sutil.AppAttr: osd.AppName, k8sutil.ClusterAttr: request.Name}) - if err != nil { - return reconcile.Result{}, err - } - - // Fetching the cephCluster - cephClusterInstance := &cephv1.CephCluster{} - err = r.client.Get(context.TODO(), request.NamespacedName, cephClusterInstance) - if kerrors.IsNotFound(err) { - logger.Infof("cephCluster instance not found for %s", request.NamespacedName) - return reconcile.Result{}, nil - } else if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to fetch cephCluster %s", request.NamespacedName) - } - - // skipping the reconcile since the feature is switched off - if !cephClusterInstance.Spec.DisruptionManagement.ManageMachineDisruptionBudgets { - logger.Debugf("Skipping reconcile for cephCluster %s as manageMachineDisruption is turned off", request.NamespacedName) - return reconcile.Result{}, nil - } - - // Fetch list of machines available - machines := &mapiv1.MachineList{} - err = r.client.List(context.TODO(), machines, client.InNamespace(cephClusterInstance.Spec.DisruptionManagement.MachineDisruptionBudgetNamespace)) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed tp fetch machine list") - } - - nodeMachineMap := map[string]machine{} - - // Adding machines to nodeMachineMap - for _, m := range machines.Items { - if m.Status.NodeRef != nil { - nodeMachineMap[m.Status.NodeRef.Name] = machine{RawMachine: m} - } - } - - // Marking machines that are occupied by the osd pods - for _, pod := range pods.Items { - if pod.Spec.NodeName != "" { - if machine, p := nodeMachineMap[pod.Spec.NodeName]; p { - machine.isOccupiedByOSD = true - nodeMachineMap[pod.Spec.NodeName] = machine - } - } - } - - // Updating the machine status - for _, machine := range nodeMachineMap { - labels := machine.RawMachine.GetLabels() - if machine.isOccupiedByOSD { - if shouldSkipMachineUpdate(labels, request.Name, request.Namespace) { - continue - } - labels[MachineFencingLabelKey] = request.Name - labels[MachineFencingNamespaceLabelKey] = request.Namespace - machine.RawMachine.SetLabels(labels) - err = r.client.Update(context.TODO(), &machine.RawMachine) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to update machine %s", machine.RawMachine.GetName()) - } - logger.Infof("Successfully updated the Machine %s", machine.RawMachine.GetName()) - } else { - if shouldSkipMachineUpdate(labels, "", "") { - continue - } - labels[MachineFencingLabelKey] = "" - labels[MachineFencingNamespaceLabelKey] = "" - machine.RawMachine.SetLabels(labels) - err = r.client.Update(context.TODO(), &machine.RawMachine) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to update machine %s", machine.RawMachine.GetName()) - } - logger.Infof("Successfully updated the Machine %s", machine.RawMachine.GetName()) - } - } - - return reconcile.Result{}, nil -} - -// shouldSkipMachineUpdate return true if the machine labels are already the expected value -func shouldSkipMachineUpdate(labels map[string]string, expectedName, expectedNamespace string) bool { - clusterName, isClusterNamePresent := labels[MachineFencingLabelKey] - clusterNamespace, isClusterNamespacePresent := labels[MachineFencingNamespaceLabelKey] - return isClusterNamePresent && isClusterNamespacePresent && clusterName == expectedName && clusterNamespace == expectedNamespace -} diff --git a/pkg/operator/ceph/file/controller.go b/pkg/operator/ceph/file/controller.go deleted file mode 100644 index 4d0b3fe9f..000000000 --- a/pkg/operator/ceph/file/controller.go +++ /dev/null @@ -1,459 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package file manages a CephFS filesystem and the required daemons. -package file - -import ( - "context" - "fmt" - "reflect" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opconfig "github.com/rook/rook/pkg/operator/ceph/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/file/mirror" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - controllerName = "ceph-file-controller" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -// List of object resources to watch by the controller -var objectsToWatch = []client.Object{ - &corev1.Secret{TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: corev1.SchemeGroupVersion.String()}}, - &appsv1.Deployment{TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.String()}}, -} - -var cephFilesystemKind = reflect.TypeOf(cephv1.CephFilesystem{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephFilesystemKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -// ReconcileCephFilesystem reconciles a CephFilesystem object -type ReconcileCephFilesystem struct { - client client.Client - scheme *runtime.Scheme - context *clusterd.Context - cephClusterSpec *cephv1.ClusterSpec - clusterInfo *cephclient.ClusterInfo - fsChannels map[string]*fsHealth -} - -type fsHealth struct { - stopChan chan struct{} - monitoringRunning bool -} - -// Add creates a new CephFilesystem Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - return &ReconcileCephFilesystem{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - fsChannels: make(map[string]*fsHealth), - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the CephFilesystem CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephFilesystem{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - // Watch all other resources - for _, t := range objectsToWatch { - err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cephv1.CephFilesystem{}, - }, opcontroller.WatchPredicateForNonCRDObject(&cephv1.CephFilesystem{TypeMeta: controllerTypeMeta}, mgr.GetScheme())) - if err != nil { - return err - } - } - - // Build Handler function to return the list of ceph filesystems - // This is used by the watchers below - handlerFunc, err := opcontroller.ObjectToCRMapper(mgr.GetClient(), &cephv1.CephFilesystemList{}, mgr.GetScheme()) - if err != nil { - return err - } - - // Watch for CephCluster Spec changes that we want to propagate to us - err = c.Watch(&source.Kind{Type: &cephv1.CephCluster{ - TypeMeta: metav1.TypeMeta{ - Kind: opcontroller.ClusterResource.Kind, - APIVersion: opcontroller.ClusterResource.APIVersion, - }, - }, - }, handler.EnqueueRequestsFromMapFunc(handlerFunc), opcontroller.WatchCephClusterPredicate()) - if err != nil { - return err - } - - // Watch for ConfigMap "rook-ceph-mon-endpoints" update and reconcile, which will reconcile update the bootstrap peer token - err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: corev1.SchemeGroupVersion.String()}}}, handler.EnqueueRequestsFromMapFunc(handlerFunc), mon.PredicateMonEndpointChanges()) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a CephFilesystem object and makes changes based on the state read -// and what is in the cephFilesystem.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileCephFilesystem) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, err := r.reconcile(request) - if err != nil { - logger.Errorf("failed to reconcile %v", err) - } - - return reconcileResponse, err -} - -func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the cephFilesystem instance - cephFilesystem := &cephv1.CephFilesystem{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephFilesystem) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("cephFilesystem resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, errors.Wrap(err, "failed to get cephFilesystem") - } - - // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephFilesystem) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") - } - - // The CR was just created, initializing status fields - if cephFilesystem.Status == nil { - updateStatus(r.client, request.NamespacedName, k8sutil.EmptyStatus, nil) - } - - // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - // This handles the case where the Ceph Cluster is gone and we want to delete that CR - // We skip the deleteFilesystem() function since everything is gone already - // - // Also, only remove the finalizer if the CephCluster is gone - // If not, we should wait for it to be ready - // This handles the case where the operator is not ready to accept Ceph command but the cluster exists - if !cephFilesystem.GetDeletionTimestamp().IsZero() && !cephClusterExists { - // Remove finalizer - err := opcontroller.RemoveFinalizer(r.client, cephFilesystem) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - return reconcileResponse, nil - } - r.cephClusterSpec = &cephCluster.Spec - - // Initialize the channel, it allows us to track multiple CephFilesystems in the same namespace - _, fsChannelExists := r.fsChannels[fsChannelKeyName(cephFilesystem)] - if !fsChannelExists { - r.fsChannels[fsChannelKeyName(cephFilesystem)] = &fsHealth{ - stopChan: make(chan struct{}), - monitoringRunning: false, - } - } - - // Populate clusterInfo - // Always populate it during each reconcile - clusterInfo, _, _, err := mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to populate cluster info") - } - r.clusterInfo = clusterInfo - - // Populate CephVersion - currentCephVersion, err := cephclient.LeastUptodateDaemonVersion(r.context, r.clusterInfo, opconfig.MonType) - if err != nil { - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Info(opcontroller.OperatorNotInitializedMessage) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil - } - return reconcile.Result{}, errors.Wrapf(err, "failed to retrieve current ceph %q version", opconfig.MonType) - } - r.clusterInfo.CephVersion = currentCephVersion - - // DELETE: the CR was deleted - if !cephFilesystem.GetDeletionTimestamp().IsZero() { - logger.Debugf("deleting filesystem %q", cephFilesystem.Name) - err = r.reconcileDeleteFilesystem(cephFilesystem) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to delete filesystem %q. ", cephFilesystem.Name) - } - - // If the ceph fs still in the map, we must remove it during CR deletion - if fsChannelExists { - // Close the channel to stop the mirroring status - close(r.fsChannels[fsChannelKeyName(cephFilesystem)].stopChan) - - // Remove ceph fs from the map - delete(r.fsChannels, fsChannelKeyName(cephFilesystem)) - } - - // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephFilesystem) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - - // validate the filesystem settings - if err := validateFilesystem(r.context, r.clusterInfo, r.cephClusterSpec, cephFilesystem); err != nil { - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Info(opcontroller.OperatorNotInitializedMessage) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil - } - return reconcile.Result{}, errors.Wrapf(err, "invalid object filesystem %q arguments", cephFilesystem.Name) - } - - // RECONCILE - logger.Debug("reconciling ceph filesystem store deployments") - reconcileResponse, err = r.reconcileCreateFilesystem(cephFilesystem) - if err != nil { - updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure, nil) - return reconcileResponse, err - } - - // Enable mirroring if needed - if r.clusterInfo.CephVersion.IsAtLeast(mirror.PeerAdditionMinVersion) { - // Disable mirroring on that filesystem if needed - if cephFilesystem.Spec.Mirroring != nil { - if !cephFilesystem.Spec.Mirroring.Enabled { - err = cephclient.DisableFilesystemSnapshotMirror(r.context, r.clusterInfo, cephFilesystem.Name) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to disable mirroring on filesystem %q", cephFilesystem.Name) - } - } else { - logger.Info("reconciling cephfs-mirror mirroring configuration") - err = r.reconcileMirroring(cephFilesystem, request.NamespacedName) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to configure mirroring for filesystem %q.", cephFilesystem.Name) - } - - // Always create a bootstrap peer token in case another cluster wants to add us as a peer - logger.Info("reconciling create cephfs-mirror peer configuration") - reconcileResponse, err = opcontroller.CreateBootstrapPeerSecret(r.context, r.clusterInfo, cephFilesystem, k8sutil.NewOwnerInfo(cephFilesystem, r.scheme)) - if err != nil { - updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure, nil) - return reconcileResponse, errors.Wrapf(err, "failed to create cephfs-mirror bootstrap peer for filesystem %q.", cephFilesystem.Name) - } - - logger.Info("reconciling add cephfs-mirror peer configuration") - err = r.reconcileAddBoostrapPeer(cephFilesystem, request.NamespacedName) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to configure mirroring for filesystem %q.", cephFilesystem.Name) - } - - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady, opcontroller.GenerateStatusInfo(cephFilesystem)) - - // Run go routine check for mirroring status - if !cephFilesystem.Spec.StatusCheck.Mirror.Disabled { - // Start monitoring cephfs-mirror status - if r.fsChannels[fsChannelKeyName(cephFilesystem)].monitoringRunning { - logger.Debug("ceph filesystem mirror status monitoring go routine already running!") - } else { - checker := newMirrorChecker(r.context, r.client, r.clusterInfo, request.NamespacedName, &cephFilesystem.Spec, cephFilesystem.Name) - r.fsChannels[fsChannelKeyName(cephFilesystem)].monitoringRunning = true - go checker.checkMirroring(r.fsChannels[fsChannelKeyName(cephFilesystem)].stopChan) - } - } - } - } - } else { - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady, nil) - } - - // Return and do not requeue - logger.Debug("done reconciling") - return reconcile.Result{}, nil -} - -func (r *ReconcileCephFilesystem) reconcileCreateFilesystem(cephFilesystem *cephv1.CephFilesystem) (reconcile.Result, error) { - if r.cephClusterSpec.External.Enable { - _, err := opcontroller.ValidateCephVersionsBetweenLocalAndExternalClusters(r.context, r.clusterInfo) - if err != nil { - // This handles the case where the operator is running, the external cluster has been upgraded and a CR creation is called - // If that's a major version upgrade we fail, if it's a minor version, we continue, it's not ideal but not critical - return reconcile.Result{}, errors.Wrapf(err, "refusing to run new crd") - } - } - - // preservePoolsOnDelete being set to true has data-loss concerns and is deprecated (see #6492). - // If preservePoolsOnDelete is set to true, assume the user means preserveFilesystemOnDelete instead. - if cephFilesystem.Spec.PreservePoolsOnDelete { - if !cephFilesystem.Spec.PreserveFilesystemOnDelete { - logger.Warning("preservePoolsOnDelete (currently set 'true') has been deprecated in favor of preserveFilesystemOnDelete (currently set 'false') due to data loss concerns so Rook will assume preserveFilesystemOnDelete 'true'") - cephFilesystem.Spec.PreserveFilesystemOnDelete = true - } - } - - ownerInfo := k8sutil.NewOwnerInfo(cephFilesystem, r.scheme) - err := createFilesystem(r.context, r.clusterInfo, *cephFilesystem, r.cephClusterSpec, ownerInfo, r.cephClusterSpec.DataDirHostPath) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to create filesystem %q", cephFilesystem.Name) - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileCephFilesystem) reconcileDeleteFilesystem(cephFilesystem *cephv1.CephFilesystem) error { - ownerInfo := k8sutil.NewOwnerInfo(cephFilesystem, r.scheme) - err := deleteFilesystem(r.context, r.clusterInfo, *cephFilesystem, r.cephClusterSpec, ownerInfo, r.cephClusterSpec.DataDirHostPath) - if err != nil { - return err - } - - return nil -} - -func (r *ReconcileCephFilesystem) reconcileMirroring(cephFilesystem *cephv1.CephFilesystem, namespacedName types.NamespacedName) error { - // Enable the mgr module - err := cephclient.MgrEnableModule(r.context, r.clusterInfo, "mirroring", false) - if err != nil { - return errors.Wrap(err, "failed to enable mirroring mgr module") - } - - // Enable snapshot mirroring on that filesystem - err = cephclient.EnableFilesystemSnapshotMirror(r.context, r.clusterInfo, cephFilesystem.Name) - if err != nil { - return errors.Wrapf(err, "failed to enable mirroring on filesystem %q", cephFilesystem.Name) - } - - // Add snapshot schedules - if cephFilesystem.Spec.Mirroring.SnapShotScheduleEnabled() { - // Enable the snap_schedule module - err = cephclient.MgrEnableModule(r.context, r.clusterInfo, "snap_schedule", false) - if err != nil { - return errors.Wrap(err, "failed to enable snap_schedule mgr module") - } - - // Enable snapshot schedules - for _, snap := range cephFilesystem.Spec.Mirroring.SnapshotSchedules { - err = cephclient.AddSnapshotSchedule(r.context, r.clusterInfo, snap.Path, snap.Interval, snap.StartTime, cephFilesystem.Name) - if err != nil { - return errors.Wrapf(err, "failed to add snapshot schedules on filesystem %q", cephFilesystem.Name) - } - } - // Enable snapshot retention - for _, retention := range cephFilesystem.Spec.Mirroring.SnapshotRetention { - err = cephclient.AddSnapshotScheduleRetention(r.context, r.clusterInfo, retention.Path, retention.Duration, cephFilesystem.Name) - if err != nil { - return errors.Wrapf(err, "failed to add snapshot retention on filesystem %q", cephFilesystem.Name) - } - } - } - - return nil -} - -func (r *ReconcileCephFilesystem) reconcileAddBoostrapPeer(cephFilesystem *cephv1.CephFilesystem, namespacedName types.NamespacedName) error { - if cephFilesystem.Spec.Mirroring.Peers == nil { - return nil - } - ctx := context.TODO() - // List all the peers secret, we can have more than one peer we might want to configure - // For each, get the Kubernetes Secret and import the "peer token" so that we can configure the mirroring - for _, peerSecret := range cephFilesystem.Spec.Mirroring.Peers.SecretNames { - logger.Debugf("fetching bootstrap peer kubernetes secret %q", peerSecret) - s, err := r.context.Clientset.CoreV1().Secrets(r.clusterInfo.Namespace).Get(ctx, peerSecret, metav1.GetOptions{}) - // We don't care about IsNotFound here, we still need to fail - if err != nil { - return errors.Wrapf(err, "failed to fetch kubernetes secret %q fs-mirror bootstrap peer", peerSecret) - } - - // Validate peer secret content - err = opcontroller.ValidatePeerToken(cephFilesystem, s.Data) - if err != nil { - return errors.Wrapf(err, "failed to validate fs-mirror bootstrap peer secret %q data", peerSecret) - } - - // Add fs-mirror peer - err = cephclient.ImportFSMirrorBootstrapPeer(r.context, r.clusterInfo, cephFilesystem.Name, string(s.Data["token"])) - if err != nil { - return errors.Wrap(err, "failed to import filesystem bootstrap peer token") - } - } - - return nil -} - -func fsChannelKeyName(cephFilesystem *cephv1.CephFilesystem) string { - return fmt.Sprintf("%s-%s", cephFilesystem.Namespace, cephFilesystem.Name) -} diff --git a/pkg/operator/ceph/file/controller_test.go b/pkg/operator/ceph/file/controller_test.go deleted file mode 100644 index 1d1570a04..000000000 --- a/pkg/operator/ceph/file/controller_test.go +++ /dev/null @@ -1,310 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package file to manage a rook filesystem -package file - -import ( - "context" - "os" - "testing" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - fsGet = `{ - "mdsmap":{ - "epoch":49, - "flags":50, - "ever_allowed_features":32, - "explicitly_allowed_features":32, - "created":"2020-03-17 13:17:43.743717", - "modified":"2020-03-17 15:22:51.020576", - "tableserver":0, - "root":0, - "session_timeout":60, - "session_autoclose":300, - "min_compat_client":"-1 (unspecified)", - "max_file_size":1099511627776, - "last_failure":0, - "last_failure_osd_epoch":0, - "compat":{ - "compat":{ - - }, - "ro_compat":{ - - }, - "incompat":{ - "feature_1":"base v0.20", - "feature_2":"client writeable ranges", - "feature_3":"default file layouts on dirs", - "feature_4":"dir inode in separate object", - "feature_5":"mds uses versioned encoding", - "feature_6":"dirfrag is stored in omap", - "feature_8":"no anchor table", - "feature_9":"file layout v2", - "feature_10":"snaprealm v2" - } - }, - "max_mds":1, - "in":[ - 0 - ], - "up":{ - "mds_0":4463 - }, - "failed":[ - - ], - "damaged":[ - - ], - "stopped":[ - - ], - "info":{ - "gid_4463":{ - "gid":4463, - "name":"myfs-a", - "rank":0, - "incarnation":5, - "state":"up:active", - "state_seq":3, - "addr":"172.17.0.12:6801/175789278", - "addrs":{ - "addrvec":[ - { - "type":"v2", - "addr":"172.17.0.12:6800", - "nonce":175789278 - }, - { - "type":"v1", - "addr":"172.17.0.12:6801", - "nonce":175789278 - } - ] - }, - "export_targets":[ - - ], - "features":4611087854031667199, - "flags":0 - } - }, - "data_pools":[ - 3 - ], - "metadata_pool":2, - "enabled":true, - "fs_name":"myfs", - "balancer":"", - "standby_count_wanted":1 - }, - "id":1 - }` - mdsCephAuthGetOrCreateKey = `{"key":"AQCvzWBeIV9lFRAAninzm+8XFxbSfTiPwoX50g=="}` - dummyVersionsRaw = ` - { - "mon": { - "ceph version 14.2.8 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 3 - } - }` -) - -var ( - name = "my-fs" - namespace = "rook-ceph" -) - -func TestCephFilesystemController(t *testing.T) { - ctx := context.TODO() - // Set DEBUG logging - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - os.Setenv("ROOK_LOG_LEVEL", "DEBUG") - - // - // TEST 1 SETUP - // - // FAILURE because no CephCluster - // - // A Pool resource with metadata and spec. - fs := &cephv1.CephFilesystem{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: cephv1.FilesystemSpec{ - MetadataServer: cephv1.MetadataServerSpec{ - ActiveCount: 1, - }, - }, - TypeMeta: controllerTypeMeta, - } - - // Objects to track in the fake client. - object := []runtime.Object{ - fs, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - return "", nil - }, - } - - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectStore{}) - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileCephFilesystem object with the scheme and fake client. - r := &ReconcileCephFilesystem{client: cl, scheme: s, context: c, fsChannels: make(map[string]*fsHealth)} - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - logger.Info("STARTING PHASE 1") - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 1 DONE") - - // - // TEST 2: - // - // FAILURE we have a cluster but it's not ready - // - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - object = append(object, cephCluster) - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileCephFilesystem object with the scheme and fake client. - r = &ReconcileCephFilesystem{client: cl, scheme: s, context: c} - logger.Info("STARTING PHASE 2") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 2 DONE") - - // - // TEST 3: - // - // SUCCESS! The CephCluster is ready - // - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "fs" && args[1] == "get" { - return fsGet, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return mdsCephAuthGetOrCreateKey, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - return "", nil - }, - } - c.Executor = executor - - // Create a ReconcileCephFilesystem object with the scheme and fake client. - r = &ReconcileCephFilesystem{client: cl, scheme: s, context: c, fsChannels: make(map[string]*fsHealth)} - - logger.Info("STARTING PHASE 3") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, fs) - assert.NoError(t, err) - assert.Equal(t, cephv1.ConditionType("Ready"), fs.Status.Phase, fs) - logger.Info("PHASE 3 DONE") -} diff --git a/pkg/operator/ceph/file/filesystem.go b/pkg/operator/ceph/file/filesystem.go deleted file mode 100644 index c6cba903b..000000000 --- a/pkg/operator/ceph/file/filesystem.go +++ /dev/null @@ -1,309 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package file - -import ( - "fmt" - "syscall" - - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/file/mds" - "github.com/rook/rook/pkg/operator/ceph/pool" -) - -const ( - dataPoolSuffix = "data" - metaDataPoolSuffix = "metadata" -) - -// Filesystem represents an instance of a Ceph filesystem (CephFS) -type Filesystem struct { - Name string - Namespace string -} - -// createFilesystem creates a Ceph filesystem with metadata servers -func createFilesystem( - context *clusterd.Context, - clusterInfo *cephclient.ClusterInfo, - fs cephv1.CephFilesystem, - clusterSpec *cephv1.ClusterSpec, - ownerInfo *k8sutil.OwnerInfo, - dataDirHostPath string, -) error { - if len(fs.Spec.DataPools) != 0 { - f := newFS(fs.Name, fs.Namespace) - if err := f.doFilesystemCreate(context, clusterInfo, clusterSpec, fs.Spec); err != nil { - return errors.Wrapf(err, "failed to create filesystem %q", fs.Name) - } - } - - filesystem, err := cephclient.GetFilesystem(context, clusterInfo, fs.Name) - if err != nil { - return errors.Wrapf(err, "failed to get filesystem %q", fs.Name) - } - - if fs.Spec.MetadataServer.ActiveStandby { - if err = cephclient.AllowStandbyReplay(context, clusterInfo, fs.Name, fs.Spec.MetadataServer.ActiveStandby); err != nil { - return errors.Wrapf(err, "failed to set allow_standby_replay to filesystem %q", fs.Name) - } - } - - // set the number of active mds instances - if fs.Spec.MetadataServer.ActiveCount > 1 { - if err = cephclient.SetNumMDSRanks(context, clusterInfo, fs.Name, fs.Spec.MetadataServer.ActiveCount); err != nil { - logger.Warningf("failed setting active mds count to %d. %v", fs.Spec.MetadataServer.ActiveCount, err) - } - } - - logger.Infof("start running mdses for filesystem %q", fs.Name) - c := mds.NewCluster(clusterInfo, context, clusterSpec, fs, filesystem, ownerInfo, dataDirHostPath) - if err := c.Start(); err != nil { - return err - } - - return nil -} - -// deleteFilesystem deletes the filesystem from Ceph -func deleteFilesystem( - context *clusterd.Context, - clusterInfo *cephclient.ClusterInfo, - fs cephv1.CephFilesystem, - clusterSpec *cephv1.ClusterSpec, - ownerInfo *k8sutil.OwnerInfo, - dataDirHostPath string, -) error { - filesystem, err := cephclient.GetFilesystem(context, clusterInfo, fs.Name) - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - // If we're deleting the filesystem anyway, ignore the error that the filesystem doesn't exist - return nil - } - return errors.Wrapf(err, "failed to get filesystem %q", fs.Name) - } - c := mds.NewCluster(clusterInfo, context, clusterSpec, fs, filesystem, ownerInfo, dataDirHostPath) - - // Delete mds CephX keys and configuration in centralized mon database - replicas := fs.Spec.MetadataServer.ActiveCount * 2 - for i := 0; i < int(replicas); i++ { - daemonLetterID := k8sutil.IndexToName(i) - daemonName := fmt.Sprintf("%s-%s", fs.Name, daemonLetterID) - - err = c.DeleteMdsCephObjects(daemonName) - if err != nil { - return errors.Wrapf(err, "failed to delete mds ceph objects for filesystem %q", fs.Name) - } - } - - // The most important part of deletion is that the filesystem gets removed from Ceph - // The K8s resources will already be removed with the K8s owner references - if err := downFilesystem(context, clusterInfo, fs.Name); err != nil { - // If the fs isn't deleted from Ceph, leave the daemons so it can still be used. - return errors.Wrapf(err, "failed to down filesystem %q", fs.Name) - } - - // Permanently remove the filesystem if it was created by rook and the spec does not prevent it. - if len(fs.Spec.DataPools) != 0 && !fs.Spec.PreserveFilesystemOnDelete { - if err := cephclient.RemoveFilesystem(context, clusterInfo, fs.Name, fs.Spec.PreservePoolsOnDelete); err != nil { - return errors.Wrapf(err, "failed to remove filesystem %q", fs.Name) - } - } - return nil -} - -func validateFilesystem(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, clusterSpec *cephv1.ClusterSpec, f *cephv1.CephFilesystem) error { - if f.Name == "" { - return errors.New("missing name") - } - if f.Namespace == "" { - return errors.New("missing namespace") - } - if f.Spec.MetadataServer.ActiveCount < 1 { - return errors.New("MetadataServer.ActiveCount must be at least 1") - } - // No data pool means that we expect the fs to exist already - if len(f.Spec.DataPools) == 0 { - return nil - } - if err := pool.ValidatePoolSpec(context, clusterInfo, clusterSpec, &f.Spec.MetadataPool); err != nil { - return errors.Wrap(err, "invalid metadata pool") - } - for _, p := range f.Spec.DataPools { - localpoolSpec := p - if err := pool.ValidatePoolSpec(context, clusterInfo, clusterSpec, &localpoolSpec); err != nil { - return errors.Wrap(err, "Invalid data pool") - } - } - - return nil -} - -// newFS creates a new instance of the file (MDS) service -func newFS(name, namespace string) *Filesystem { - return &Filesystem{ - Name: name, - Namespace: namespace, - } -} - -// SetPoolSize function sets the sizes for MetadataPool and dataPool -func SetPoolSize(f *Filesystem, context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, clusterSpec *cephv1.ClusterSpec, spec cephv1.FilesystemSpec) error { - // generating the metadata pool's name - metadataPoolName := generateMetaDataPoolName(f) - err := cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, metadataPoolName, spec.MetadataPool, "") - if err != nil { - return errors.Wrapf(err, "failed to update metadata pool %q", metadataPoolName) - } - // generating the data pool's name - dataPoolNames := generateDataPoolNames(f, spec) - for i, pool := range spec.DataPools { - poolName := dataPoolNames[i] - err := cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool, "") - if err != nil { - return errors.Wrapf(err, "failed to update datapool %q", poolName) - } - } - return nil -} - -// updateFilesystem ensures that a filesystem which already exists matches the provided spec. -func (f *Filesystem) updateFilesystem(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, clusterSpec *cephv1.ClusterSpec, spec cephv1.FilesystemSpec) error { - // Even if the fs already exists, the num active mdses may have changed - if err := cephclient.SetNumMDSRanks(context, clusterInfo, f.Name, spec.MetadataServer.ActiveCount); err != nil { - logger.Errorf( - fmt.Sprintf("failed to set num mds ranks (max_mds) to %d for filesystem %s, still continuing. ", spec.MetadataServer.ActiveCount, f.Name) + - "this error is not critical, but mdses may not be as failure tolerant as desired. " + - fmt.Sprintf("USER should verify that the number of active mdses is %d with 'ceph fs get %s'", spec.MetadataServer.ActiveCount, f.Name) + - fmt.Sprintf(". %v", err), - ) - } - - if err := SetPoolSize(f, context, clusterInfo, clusterSpec, spec); err != nil { - return errors.Wrap(err, "failed to set pools size") - } - - dataPoolNames := generateDataPoolNames(f, spec) - for i := range spec.DataPools { - if err := cephclient.AddDataPoolToFilesystem(context, clusterInfo, f.Name, dataPoolNames[i]); err != nil { - return err - } - } - return nil -} - -// doFilesystemCreate starts the Ceph file daemons and creates the filesystem in Ceph. -func (f *Filesystem) doFilesystemCreate(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, clusterSpec *cephv1.ClusterSpec, spec cephv1.FilesystemSpec) error { - - _, err := cephclient.GetFilesystem(context, clusterInfo, f.Name) - if err == nil { - logger.Infof("filesystem %q already exists", f.Name) - return f.updateFilesystem(context, clusterInfo, clusterSpec, spec) - } - if len(spec.DataPools) == 0 { - return errors.New("at least one data pool must be specified") - } - - fslist, err := cephclient.ListFilesystems(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to list existing filesystem(s)") - } - // This check prevents from concurrent CephFilesystem CRD trying to create a filesystem - // Whoever gets to create the Filesystem first wins the race, then we fail if that cluster is not Ceph Pacific and one Filesystem is present - if len(fslist) > 0 && !clusterInfo.CephVersion.IsAtLeastPacific() { - return errors.New("multiple filesystems are only supported as of ceph pacific") - } - - poolNames, err := cephclient.GetPoolNamesByID(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get pool names") - } - - logger.Infof("creating filesystem %q", f.Name) - - // Make easy to locate a pool by name and avoid repeated searches - reversedPoolMap := make(map[string]int) - for key, value := range poolNames { - reversedPoolMap[value] = key - } - - metadataPoolName := generateMetaDataPoolName(f) - if _, poolFound := reversedPoolMap[metadataPoolName]; !poolFound { - err = cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, metadataPoolName, spec.MetadataPool, "") - if err != nil { - return errors.Wrapf(err, "failed to create metadata pool %q", metadataPoolName) - } - } - - dataPoolNames := generateDataPoolNames(f, spec) - for i, pool := range spec.DataPools { - poolName := dataPoolNames[i] - if _, poolFound := reversedPoolMap[poolName]; !poolFound { - err = cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool, "") - if err != nil { - return errors.Wrapf(err, "failed to create data pool %q", poolName) - } - if pool.IsErasureCoded() { - // An erasure coded data pool used for a filesystem must allow overwrites - if err := cephclient.SetPoolProperty(context, clusterInfo, poolName, "allow_ec_overwrites", "true"); err != nil { - logger.Warningf("failed to set ec pool property. %v", err) - } - } - } - } - - // create the filesystem ('fs new' needs to be forced in order to reuse pre-existing pools) - // if only one pool is created new it won't work (to avoid inconsistencies). - if err := cephclient.CreateFilesystem(context, clusterInfo, f.Name, metadataPoolName, dataPoolNames); err != nil { - return err - } - - logger.Infof("created filesystem %q on %d data pool(s) and metadata pool %q", f.Name, len(dataPoolNames), metadataPoolName) - return nil -} - -// downFilesystem marks the filesystem as down and the MDS' as failed -func downFilesystem(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, filesystemName string) error { - logger.Infof("downing filesystem %q", filesystemName) - - if err := cephclient.FailFilesystem(context, clusterInfo, filesystemName); err != nil { - return err - } - logger.Infof("downed filesystem %q", filesystemName) - return nil -} - -// generateDataPoolName generates DataPool name by prefixing the filesystem name to the constant DataPoolSuffix -func generateDataPoolNames(f *Filesystem, spec cephv1.FilesystemSpec) []string { - var dataPoolNames []string - for i := range spec.DataPools { - poolName := fmt.Sprintf("%s-%s%d", f.Name, dataPoolSuffix, i) - dataPoolNames = append(dataPoolNames, poolName) - } - return dataPoolNames -} - -// generateMetaDataPoolName generates MetaDataPool name by prefixing the filesystem name to the constant metaDataPoolSuffix -func generateMetaDataPoolName(f *Filesystem) string { - return fmt.Sprintf("%s-%s", f.Name, metaDataPoolSuffix) -} diff --git a/pkg/operator/ceph/file/filesystem_test.go b/pkg/operator/ceph/file/filesystem_test.go deleted file mode 100644 index e53004595..000000000 --- a/pkg/operator/ceph/file/filesystem_test.go +++ /dev/null @@ -1,562 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package file - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "reflect" - "strings" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" - "github.com/rook/rook/pkg/operator/ceph/file/mds" - "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - testopk8s "github.com/rook/rook/pkg/operator/k8sutil/test" - testop "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestValidateSpec(t *testing.T) { - context := &clusterd.Context{Executor: &exectest.MockExecutor{}} - clusterInfo := &cephclient.ClusterInfo{Namespace: "ns"} - fs := &cephv1.CephFilesystem{} - clusterSpec := &cephv1.ClusterSpec{} - - // missing name - assert.NotNil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs)) - fs.Name = "myfs" - - // missing namespace - assert.NotNil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs)) - fs.Namespace = "myns" - - // missing data pools - assert.NotNil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs)) - p := cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}} - fs.Spec.DataPools = append(fs.Spec.DataPools, p) - - // missing metadata pool - assert.NotNil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs)) - fs.Spec.MetadataPool = p - - // missing mds count - assert.NotNil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs)) - fs.Spec.MetadataServer.ActiveCount = 1 - - // valid! - assert.Nil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs)) -} - -func isBasePoolOperation(fsName, command string, args []string) bool { - if reflect.DeepEqual(args[0:7], []string{"osd", "pool", "create", fsName + "-metadata", "0", "replicated", fsName + "-metadata"}) { - return true - } else if reflect.DeepEqual(args[0:5], []string{"osd", "crush", "rule", "create-replicated", fsName + "-metadata"}) { - return true - } else if reflect.DeepEqual(args[0:6], []string{"osd", "pool", "set", fsName + "-metadata", "size", "1"}) { - return true - } else if reflect.DeepEqual(args[0:7], []string{"osd", "pool", "create", fsName + "-data0", "0", "replicated", fsName + "-data0"}) { - return true - } else if reflect.DeepEqual(args[0:5], []string{"osd", "crush", "rule", "create-replicated", fsName + "-data0"}) { - return true - } else if reflect.DeepEqual(args[0:6], []string{"osd", "pool", "set", fsName + "-data0", "size", "1"}) { - return true - } else if reflect.DeepEqual(args[0:4], []string{"fs", "add_data_pool", fsName, fsName + "-data0"}) { - return true - } - return false -} - -func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool) *exectest.MockExecutor { - mdsmap := cephclient.CephFilesystemDetails{ - ID: 0, - MDSMap: cephclient.MDSMap{ - FilesystemName: fsName, - MetadataPool: 2, - MaxMDS: 1, - Up: map[string]int{ - "mds_0": 123, - }, - DataPools: []int{3}, - Info: map[string]cephclient.MDSInfo{ - "gid_123": { - GID: 123, - State: "up:active", - Name: fmt.Sprintf("%s-%s", fsName, "a"), - }, - "gid_124": { - GID: 124, - State: "up:standby-replay", - Name: fmt.Sprintf("%s-%s", fsName, "b"), - }, - }, - }, - } - createdFsResponse, _ := json.Marshal(mdsmap) - firstGet := true - - if multiFS { - return &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if contains(args, "fs") && contains(args, "get") { - if firstGet { - firstGet = false - return "", errors.New("fs doesn't exist") - } - return string(createdFsResponse), nil - } else if contains(args, "fs") && contains(args, "ls") { - return `[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":4,"data_pool_ids":[5],"data_pools":["myfs-data0"]},{"name":"myfs2","metadata_pool":"myfs2-metadata","metadata_pool_id":6,"data_pool_ids":[7],"data_pools":["myfs2-data0"]},{"name":"leseb","metadata_pool":"cephfs.leseb.meta","metadata_pool_id":8,"data_pool_ids":[9],"data_pools":["cephfs.leseb.data"]}]`, nil - } else if contains(args, "fs") && contains(args, "dump") { - return `{"standbys":[], "filesystems":[]}`, nil - } else if contains(args, "osd") && contains(args, "lspools") { - return "[]", nil - } else if contains(args, "mds") && contains(args, "fail") { - return "", nil - } else if isBasePoolOperation(fsName, command, args) { - return "", nil - } else if reflect.DeepEqual(args[0:5], []string{"fs", "new", fsName, fsName + "-metadata", fsName + "-data0"}) { - return "", nil - } else if contains(args, "auth") && contains(args, "get-or-create-key") { - return "{\"key\":\"mysecurekey\"}", nil - } else if contains(args, "auth") && contains(args, "del") { - return "", nil - } else if contains(args, "config") && contains(args, "get") { - return "{}", nil - } else if contains(args, "config") && contains(args, "mds_cache_memory_limit") { - return "", nil - } else if contains(args, "set") && contains(args, "max_mds") { - return "", nil - } else if contains(args, "set") && contains(args, "allow_standby_replay") { - return "", nil - } else if contains(args, "config") && contains(args, "mds_join_fs") { - return "", nil - } else if contains(args, "flag") && contains(args, "enable_multiple") { - return "", nil - } else if contains(args, "versions") { - versionStr, _ := json.Marshal( - map[string]map[string]int{ - "mds": { - "ceph version 16.0.0-4-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) pacific (stable)": 2, - }, - }) - return string(versionStr), nil - } else if strings.Contains(command, "ceph-authtool") { - err := clienttest.CreateConfigDir(path.Join(configDir, "ns")) - assert.Nil(t, err) - } - - assert.Fail(t, fmt.Sprintf("Unexpected command %q %q", command, args)) - return "", nil - }, - } - } - - return &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if contains(args, "fs") && contains(args, "get") { - if firstGet { - firstGet = false - return "", errors.New("fs doesn't exist") - } - return string(createdFsResponse), nil - } else if contains(args, "fs") && contains(args, "ls") { - return "[]", nil - } else if contains(args, "fs") && contains(args, "dump") { - return `{"standbys":[], "filesystems":[]}`, nil - } else if contains(args, "osd") && contains(args, "lspools") { - return "[]", nil - } else if contains(args, "mds") && contains(args, "fail") { - return "", nil - } else if isBasePoolOperation(fsName, command, args) { - return "", nil - } else if reflect.DeepEqual(args[0:5], []string{"fs", "new", fsName, fsName + "-metadata", fsName + "-data0"}) { - return "", nil - } else if contains(args, "auth") && contains(args, "get-or-create-key") { - return "{\"key\":\"mysecurekey\"}", nil - } else if contains(args, "auth") && contains(args, "del") { - return "", nil - } else if contains(args, "config") && contains(args, "mds_cache_memory_limit") { - return "", nil - } else if contains(args, "set") && contains(args, "max_mds") { - return "", nil - } else if contains(args, "set") && contains(args, "allow_standby_replay") { - return "", nil - } else if contains(args, "config") && contains(args, "mds_join_fs") { - return "", nil - } else if contains(args, "config") && contains(args, "get") { - return "{}", nil - } else if contains(args, "versions") { - versionStr, _ := json.Marshal( - map[string]map[string]int{ - "mds": { - "ceph version 16.0.0-4-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) pacific (stable)": 2, - }, - }) - return string(versionStr), nil - } else if strings.Contains(command, "ceph-authtool") { - err := clienttest.CreateConfigDir(path.Join(configDir, "ns")) - assert.Nil(t, err) - } - assert.Fail(t, fmt.Sprintf("Unexpected command %q %q", command, args)) - return "", nil - }, - } -} - -func fsTest(fsName string) cephv1.CephFilesystem { - return cephv1.CephFilesystem{ - ObjectMeta: metav1.ObjectMeta{Name: fsName, Namespace: "ns"}, - Spec: cephv1.FilesystemSpec{ - MetadataPool: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}, - DataPools: []cephv1.PoolSpec{{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}}, - MetadataServer: cephv1.MetadataServerSpec{ - ActiveCount: 1, - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(4294967296, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(1337.0, resource.BinarySI), - }, - }, - }, - }, - } -} - -func TestCreateFilesystem(t *testing.T) { - ctx := context.TODO() - var deploymentsUpdated *[]*apps.Deployment - mds.UpdateDeploymentAndWait, deploymentsUpdated = testopk8s.UpdateDeploymentAndWaitStub() - configDir, _ := ioutil.TempDir("", "") - - fsName := "myfs" - executor := fsExecutor(t, fsName, configDir, false) - defer os.RemoveAll(configDir) - clientset := testop.New(t, 1) - context := &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset} - fs := fsTest(fsName) - clusterInfo := &cephclient.ClusterInfo{FSID: "myfsid", CephVersion: version.Octopus} - - // start a basic cluster - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.Nil(t, err) - validateStart(ctx, t, context, fs) - assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // starting again should be a no-op - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.Nil(t, err) - validateStart(ctx, t, context, fs) - assert.ElementsMatch(t, []string{fmt.Sprintf("rook-ceph-mds-%s-a", fsName), fmt.Sprintf("rook-ceph-mds-%s-b", fsName)}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // Increasing the number of data pools should be successful. - createDataOnePoolCount := 0 - addDataOnePoolCount := 0 - createdFsResponse := fmt.Sprintf(`{"fs_name": "%s", "metadata_pool": 2, "data_pools":[3]}`, fsName) - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if contains(args, "fs") && contains(args, "get") { - return createdFsResponse, nil - } else if isBasePoolOperation(fsName, command, args) { - return "", nil - } else if reflect.DeepEqual(args[0:4], []string{"osd", "pool", "create", fsName + "-data1"}) { - createDataOnePoolCount++ - return "", nil - } else if reflect.DeepEqual(args[0:4], []string{"fs", "add_data_pool", fsName, fsName + "-data1"}) { - addDataOnePoolCount++ - return "", nil - } else if contains(args, "set") && contains(args, "max_mds") { - return "", nil - } else if contains(args, "auth") && contains(args, "get-or-create-key") { - return "{\"key\":\"mysecurekey\"}", nil - } else if reflect.DeepEqual(args[0:5], []string{"osd", "crush", "rule", "create-replicated", fsName + "-data1"}) { - return "", nil - } else if reflect.DeepEqual(args[0:6], []string{"osd", "pool", "set", fsName + "-data1", "size", "1"}) { - return "", nil - } else if args[0] == "config" && args[1] == "set" { - return "", nil - } else if contains(args, "versions") { - versionStr, _ := json.Marshal( - map[string]map[string]int{ - "mds": { - "ceph version 16.0.0-4-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) pacific (stable)": 2, - }, - }) - return string(versionStr), nil - } - assert.Fail(t, fmt.Sprintf("Unexpected command: %v", args)) - return "", nil - }, - } - context = &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset} - fs.Spec.DataPools = append(fs.Spec.DataPools, cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}) - - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.Nil(t, err) - validateStart(ctx, t, context, fs) - assert.ElementsMatch(t, []string{fmt.Sprintf("rook-ceph-mds-%s-a", fsName), fmt.Sprintf("rook-ceph-mds-%s-b", fsName)}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - assert.Equal(t, 1, createDataOnePoolCount) - assert.Equal(t, 1, addDataOnePoolCount) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // Test multiple filesystem creation - // Output to check multiple filesystem creation - fses := `[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":4,"data_pool_ids":[5],"data_pools":["myfs-data0"]},{"name":"myfs2","metadata_pool":"myfs2-metadata","metadata_pool_id":6,"data_pool_ids":[7],"data_pools":["myfs2-data0"]},{"name":"leseb","metadata_pool":"cephfs.leseb.meta","metadata_pool_id":8,"data_pool_ids":[9],"data_pools":["cephfs.leseb.data"]}]` - executorMultiFS := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if contains(args, "ls") { - return fses, nil - } else if contains(args, "versions") { - versionStr, _ := json.Marshal( - map[string]map[string]int{ - "mds": { - "ceph version 16.0.0-4-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) pacific (stable)": 2, - }, - }) - return string(versionStr), nil - } - return "{\"key\":\"mysecurekey\"}", errors.New("multiple fs") - }, - } - context = &clusterd.Context{ - Executor: executorMultiFS, - ConfigDir: configDir, - Clientset: clientset, - } - - // Create another filesystem which should fail - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, &k8sutil.OwnerInfo{}, "/var/lib/rook/") - assert.Error(t, err) - assert.Equal(t, fmt.Sprintf("failed to create filesystem %q: multiple filesystems are only supported as of ceph pacific", fsName), err.Error()) - - // It works since the Ceph version is Pacific - fsName = "myfs3" - fs = fsTest(fsName) - executor = fsExecutor(t, fsName, configDir, true) - clusterInfo.CephVersion = version.Pacific - context = &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset, - } - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.NoError(t, err) -} - -func TestUpgradeFilesystem(t *testing.T) { - ctx := context.TODO() - var deploymentsUpdated *[]*apps.Deployment - mds.UpdateDeploymentAndWait, deploymentsUpdated = testopk8s.UpdateDeploymentAndWaitStub() - configDir, _ := ioutil.TempDir("", "") - - fsName := "myfs" - executor := fsExecutor(t, fsName, configDir, false) - defer os.RemoveAll(configDir) - clientset := testop.New(t, 1) - context := &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset} - fs := fsTest(fsName) - clusterInfo := &cephclient.ClusterInfo{FSID: "myfsid", CephVersion: version.Octopus} - - // start a basic cluster for upgrade - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.NoError(t, err) - validateStart(ctx, t, context, fs) - assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // do upgrade - clusterInfo.CephVersion = version.Quincy - context = &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset, - } - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.NoError(t, err) - - // test fail standby daemon failed - mdsmap := cephclient.CephFilesystemDetails{ - ID: 0, - MDSMap: cephclient.MDSMap{ - FilesystemName: fsName, - MetadataPool: 2, - MaxMDS: 1, - Up: map[string]int{ - "mds_0": 123, - }, - DataPools: []int{3}, - Info: map[string]cephclient.MDSInfo{ - "gid_123": { - GID: 123, - State: "up:active", - Name: fmt.Sprintf("%s-%s", fsName, "a"), - }, - "gid_124": { - GID: 124, - State: "up:standby-replay", - Name: fmt.Sprintf("%s-%s", fsName, "b"), - }, - }, - }, - } - createdFsResponse, _ := json.Marshal(mdsmap) - firstGet := false - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - if contains(args, "fs") && contains(args, "get") { - if firstGet { - firstGet = false - return "", errors.New("fs doesn't exist") - } - return string(createdFsResponse), nil - } else if contains(args, "fs") && contains(args, "ls") { - return "[]", nil - } else if contains(args, "fs") && contains(args, "dump") { - return `{"standbys":[], "filesystems":[]}`, nil - } else if contains(args, "osd") && contains(args, "lspools") { - return "[]", nil - } else if contains(args, "mds") && contains(args, "fail") { - return "", errors.New("fail mds failed") - } else if isBasePoolOperation(fsName, command, args) { - return "", nil - } else if reflect.DeepEqual(args[0:5], []string{"fs", "new", fsName, fsName + "-metadata", fsName + "-data0"}) { - return "", nil - } else if contains(args, "auth") && contains(args, "get-or-create-key") { - return "{\"key\":\"mysecurekey\"}", nil - } else if contains(args, "auth") && contains(args, "del") { - return "", nil - } else if contains(args, "config") && contains(args, "mds_cache_memory_limit") { - return "", nil - } else if contains(args, "set") && contains(args, "max_mds") { - return "", nil - } else if contains(args, "set") && contains(args, "allow_standby_replay") { - return "", nil - } else if contains(args, "config") && contains(args, "mds_join_fs") { - return "", nil - } else if contains(args, "config") && contains(args, "get") { - return "{}", nil - } else if contains(args, "versions") { - versionStr, _ := json.Marshal( - map[string]map[string]int{ - "mds": { - "ceph version 16.0.0-4-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) pacific (stable)": 2, - }, - }) - return string(versionStr), nil - } - assert.Fail(t, fmt.Sprintf("Unexpected command %q %q", command, args)) - return "", nil - } - // do upgrade - clusterInfo.CephVersion = version.Quincy - context = &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset, - } - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.Error(t, err) - assert.Contains(t, err.Error(), "fail mds failed") -} - -func TestCreateNopoolFilesystem(t *testing.T) { - ctx := context.TODO() - clientset := testop.New(t, 3) - configDir, _ := ioutil.TempDir("", "") - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if strings.Contains(command, "ceph-authtool") { - err := clienttest.CreateConfigDir(path.Join(configDir, "ns")) - assert.Nil(t, err) - } else { - return "{\"key\":\"mysecurekey\"}", nil - } - return "", errors.New("unknown command error") - }, - } - defer os.RemoveAll(configDir) - context := &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset} - fs := cephv1.CephFilesystem{ - ObjectMeta: metav1.ObjectMeta{Name: "myfs", Namespace: "ns"}, - Spec: cephv1.FilesystemSpec{ - MetadataServer: cephv1.MetadataServerSpec{ - ActiveCount: 1, - }, - }, - } - clusterInfo := &cephclient.ClusterInfo{FSID: "myfsid"} - - // start a basic cluster - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.Nil(t, err) - validateStart(ctx, t, context, fs) - - // starting again should be a no-op - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.Nil(t, err) - validateStart(ctx, t, context, fs) -} - -func contains(arr []string, str string) bool { - for _, a := range arr { - if a == str { - return true - } - } - return false -} - -func validateStart(ctx context.Context, t *testing.T, context *clusterd.Context, fs cephv1.CephFilesystem) { - r, err := context.Clientset.AppsV1().Deployments(fs.Namespace).Get(ctx, fmt.Sprintf("rook-ceph-mds-%s-a", fs.Name), metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, fmt.Sprintf("rook-ceph-mds-%s-a", fs.Name), r.Name) - - r, err = context.Clientset.AppsV1().Deployments(fs.Namespace).Get(ctx, fmt.Sprintf("rook-ceph-mds-%s-b", fs.Name), metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, fmt.Sprintf("rook-ceph-mds-%s-b", fs.Name), r.Name) -} diff --git a/pkg/operator/ceph/file/health.go b/pkg/operator/ceph/file/health.go deleted file mode 100644 index 1f71b98fc..000000000 --- a/pkg/operator/ceph/file/health.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package file manages a CephFS filesystem and the required daemons. -package file - -import ( - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - defaultHealthCheckInterval = 1 * time.Minute -) - -type mirrorChecker struct { - context *clusterd.Context - interval time.Duration - client client.Client - clusterInfo *cephclient.ClusterInfo - namespacedName types.NamespacedName - fsSpec *cephv1.FilesystemSpec - fsName string -} - -// newMirrorChecker creates a new HealthChecker -func newMirrorChecker(context *clusterd.Context, client client.Client, clusterInfo *cephclient.ClusterInfo, namespacedName types.NamespacedName, fsSpec *cephv1.FilesystemSpec, fsName string) *mirrorChecker { - c := &mirrorChecker{ - context: context, - interval: defaultHealthCheckInterval, - clusterInfo: clusterInfo, - namespacedName: namespacedName, - client: client, - fsSpec: fsSpec, - fsName: fsName, - } - - // allow overriding the check interval - checkInterval := fsSpec.StatusCheck.Mirror.Interval - if checkInterval != nil { - logger.Infof("filesystem %q mirroring status check interval is %q", namespacedName.Name, checkInterval) - c.interval = checkInterval.Duration - } - - return c -} - -// checkMirroring periodically checks the health of the cluster -func (c *mirrorChecker) checkMirroring(stopCh chan struct{}) { - // check the mirroring health immediately before starting the loop - err := c.checkMirroringHealth() - if err != nil { - c.updateStatusMirroring(nil, nil, err.Error()) - logger.Debugf("failed to check filesystem mirroring status %q. %v", c.namespacedName.Name, err) - } - - for { - select { - case <-stopCh: - logger.Infof("stopping monitoring filesystem mirroring status %q", c.namespacedName.Name) - return - - case <-time.After(c.interval): - logger.Debugf("checking filesystem mirroring status %q", c.namespacedName.Name) - err := c.checkMirroringHealth() - if err != nil { - c.updateStatusMirroring(nil, nil, err.Error()) - logger.Debugf("failed to check filesystem %q mirroring status. %v", c.namespacedName.Name, err) - } - } - } -} - -func (c *mirrorChecker) checkMirroringHealth() error { - mirrorStatus, err := cephclient.GetFSMirrorDaemonStatus(c.context, c.clusterInfo, c.fsName) - if err != nil { - c.updateStatusMirroring(nil, nil, err.Error()) - return err - } - - var snapSchedStatus []cephv1.FilesystemSnapshotSchedulesSpec - if c.fsSpec.Mirroring.SnapShotScheduleEnabled() { - snapSchedStatus, err = cephclient.GetSnapshotScheduleStatus(c.context, c.clusterInfo, c.fsName) - if err != nil { - c.updateStatusMirroring(nil, nil, err.Error()) - return err - } - } - - // On success - c.updateStatusMirroring(mirrorStatus, snapSchedStatus, "") - - return nil -} diff --git a/pkg/operator/ceph/file/mds/config.go b/pkg/operator/ceph/file/mds/config.go deleted file mode 100644 index 13cf0a1b7..000000000 --- a/pkg/operator/ceph/file/mds/config.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mds - -import ( - "context" - "fmt" - "strconv" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - keyringTemplate = ` -[mds.%s] -key = %s -caps mon = "allow profile mds" -caps osd = "allow *" -caps mds = "allow" -` -) - -func (c *Cluster) generateKeyring(m *mdsConfig) (string, error) { - ctx := context.TODO() - user := fmt.Sprintf("mds.%s", m.DaemonID) - access := []string{"osd", "allow *", "mds", "allow", "mon", "allow profile mds"} - - // At present - s := keyring.GetSecretStore(c.context, c.clusterInfo, c.ownerInfo) - - key, err := s.GenerateKey(user, access) - if err != nil { - return "", err - } - - // Delete legacy key store for upgrade from Rook v0.9.x to v1.0.x - err = c.context.Clientset.CoreV1().Secrets(c.fs.Namespace).Delete(ctx, m.ResourceName, metav1.DeleteOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debugf("legacy mds key %s is already removed", m.ResourceName) - } else { - logger.Warningf("legacy mds key %q could not be removed. %v", m.ResourceName, err) - } - } - - keyring := fmt.Sprintf(keyringTemplate, m.DaemonID, key) - return keyring, s.CreateOrUpdate(m.ResourceName, keyring) -} - -func (c *Cluster) setDefaultFlagsMonConfigStore(mdsID string) error { - monStore := config.GetMonStore(c.context, c.clusterInfo) - who := fmt.Sprintf("mds.%s", mdsID) - configOptions := make(map[string]string) - - // Set mds cache memory limit to the best appropriate value - if !c.fs.Spec.MetadataServer.Resources.Limits.Memory().IsZero() { - mdsCacheMemoryLimit := float64(c.fs.Spec.MetadataServer.Resources.Limits.Memory().Value()) * mdsCacheMemoryLimitFactor - configOptions["mds_cache_memory_limit"] = strconv.Itoa(int(mdsCacheMemoryLimit)) - } else if !c.fs.Spec.MetadataServer.Resources.Requests.Memory().IsZero() { - mdsCacheMemoryRequest := float64(c.fs.Spec.MetadataServer.Resources.Requests.Memory().Value()) * mdsCacheMemoryResourceFactor - configOptions["mds_cache_memory_limit"] = strconv.Itoa(int(mdsCacheMemoryRequest)) - } - - // Set mds_join_fs flag to force mds daemon to join a specific fs - if c.clusterInfo.CephVersion.IsAtLeastOctopus() { - configOptions["mds_join_fs"] = c.fs.Name - } - - for flag, val := range configOptions { - err := monStore.Set(who, flag, val) - if err != nil { - return errors.Wrapf(err, "failed to set %q to %q on %q", flag, val, who) - } - } - - return nil -} diff --git a/pkg/operator/ceph/file/mds/mds.go b/pkg/operator/ceph/file/mds/mds.go deleted file mode 100644 index 6baa9828d..000000000 --- a/pkg/operator/ceph/file/mds/mds.go +++ /dev/null @@ -1,404 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package mds provides methods for managing a Ceph mds cluster. -package mds - -import ( - "context" - "fmt" - "strconv" - "strings" - "syscall" - "time" - - "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-mds") - -const ( - // AppName is the name of Rook's Ceph mds (File) sub-app - AppName = "rook-ceph-mds" - // timeout if mds is not ready for upgrade after some time - fsWaitForActiveTimeout = 3 * time.Minute - // minimum amount of memory in MB to run the pod - cephMdsPodMinimumMemory uint64 = 4096 -) - -// Cluster represents a Ceph mds cluster. -type Cluster struct { - clusterInfo *cephclient.ClusterInfo - context *clusterd.Context - clusterSpec *cephv1.ClusterSpec - fs cephv1.CephFilesystem - fsID string - ownerInfo *k8sutil.OwnerInfo - dataDirHostPath string -} - -type mdsConfig struct { - ResourceName string - DaemonID string - DataPathMap *config.DataPathMap // location to store data in container -} - -// NewCluster creates a Ceph mds cluster representation. -func NewCluster( - clusterInfo *cephclient.ClusterInfo, - context *clusterd.Context, - clusterSpec *cephv1.ClusterSpec, - fs cephv1.CephFilesystem, - fsdetails *cephclient.CephFilesystemDetails, - ownerInfo *k8sutil.OwnerInfo, - dataDirHostPath string, -) *Cluster { - return &Cluster{ - clusterInfo: clusterInfo, - context: context, - clusterSpec: clusterSpec, - fs: fs, - fsID: strconv.Itoa(fsdetails.ID), - ownerInfo: ownerInfo, - dataDirHostPath: dataDirHostPath, - } -} - -// UpdateDeploymentAndWait can be overridden for unit tests. Do not alter this for runtime operation. -var UpdateDeploymentAndWait = mon.UpdateCephDeploymentAndWait - -// Start starts or updates a Ceph mds cluster in Kubernetes. -func (c *Cluster) Start() error { - ctx := context.TODO() - // Validate pod's memory if specified - err := controller.CheckPodMemory(cephv1.ResourcesKeyMDS, c.fs.Spec.MetadataServer.Resources, cephMdsPodMinimumMemory) - if err != nil { - return errors.Wrap(err, "error checking pod memory") - } - - // If attempt was made to prepare daemons for upgrade, make sure that an attempt is made to - // bring fs state back to desired when this method returns with any error or success. - var fsPreparedForUpgrade = false - - // upgrading MDS cluster needs to set max_mds to 1 and stop all stand-by MDSes first - isUpgrade, err := c.isCephUpgrade() - if err != nil { - return errors.Wrapf(err, "failed to determine if MDS cluster for filesystem %q needs upgraded", c.fs.Name) - } - if isUpgrade { - fsPreparedForUpgrade = true - if err := c.upgradeMDS(); err != nil { - return errors.Wrapf(err, "failed to upgrade MDS cluster for filesystem %q", c.fs.Name) - } - logger.Infof("successfully upgraded MDS cluster for filesystem %q", c.fs.Name) - } - - defer func() { - if fsPreparedForUpgrade { - if err := finishedWithDaemonUpgrade(c.context, c.clusterInfo, c.fs); err != nil { - logger.Errorf("for filesystem %q, USER should make sure the Ceph fs max_mds property is set to %d. %v", - c.fs.Name, c.fs.Spec.MetadataServer.ActiveCount, err) - } - } - }() - - // Always create double the number of metadata servers to have standby mdses available - replicas := c.fs.Spec.MetadataServer.ActiveCount * 2 - - // keep list of deployments we want so unwanted ones can be deleted later - desiredDeployments := map[string]bool{} // improvised set - // Create/update deployments - for i := 0; i < int(replicas); i++ { - deployment, err := c.startDeployment(ctx, k8sutil.IndexToName(i)) - if err != nil { - return errors.Wrapf(err, "failed to start deployment for MDS %q for filesystem %q", k8sutil.IndexToName(i), c.fs.Name) - } - desiredDeployments[deployment] = true - } - - if err := c.scaleDownDeployments(replicas, c.fs.Spec.MetadataServer.ActiveCount, desiredDeployments, true); err != nil { - return errors.Wrap(err, "failed to scale down mds deployments") - } - - return nil -} - -func (c *Cluster) startDeployment(ctx context.Context, daemonLetterID string) (string, error) { - // Each mds is id'ed by - - daemonName := fmt.Sprintf("%s-%s", c.fs.Name, daemonLetterID) - // resource name is rook-ceph-mds-- - resourceName := fmt.Sprintf("%s-%s-%s", AppName, c.fs.Name, daemonLetterID) - - mdsConfig := &mdsConfig{ - ResourceName: resourceName, - DaemonID: daemonName, - DataPathMap: config.NewStatelessDaemonDataPathMap(config.MdsType, daemonName, c.fs.Namespace, c.dataDirHostPath), - } - - // create unique key for each mds saved to k8s secret - _, err := c.generateKeyring(mdsConfig) - if err != nil { - return "", errors.Wrapf(err, "failed to generate keyring for %q", resourceName) - } - - // Set the mds config flags - // Previously we were checking if the deployment was present, if not we would set the config flags - // Which means that we would only set the flag on newly created CephFilesystem CR - // Unfortunately, on upgrade we would not set the flags which is not ideal for old clusters where we were no setting those flags - // The KV supports setting those flags even if the MDS is running - logger.Info("setting mds config flags") - err = c.setDefaultFlagsMonConfigStore(mdsConfig.DaemonID) - if err != nil { - // Getting EPERM typically happens when the flag may not be modified at runtime - // This is fine to ignore - code, ok := exec.ExitStatus(err) - if ok && code != int(syscall.EPERM) { - return "", errors.Wrap(err, "failed to set default rgw config options") - } - } - - // start the deployment - d, err := c.makeDeployment(mdsConfig, c.fs.Namespace) - if err != nil { - return "", errors.Wrap(err, "failed to create deployment") - } - // Set owner ref to cephFilesystem object - err = c.ownerInfo.SetControllerReference(d) - if err != nil { - return "", errors.Wrapf(err, "failed to set owner reference for ceph filesystem %q secret", d.Name) - } - - // Set the deployment hash as an annotation - err = patch.DefaultAnnotator.SetLastAppliedAnnotation(d) - if err != nil { - return "", errors.Wrapf(err, "failed to set annotation for deployment %q", d.Name) - } - - _, createErr := c.context.Clientset.AppsV1().Deployments(c.fs.Namespace).Create(ctx, d, metav1.CreateOptions{}) - if createErr != nil { - if !kerrors.IsAlreadyExists(createErr) { - return "", errors.Wrapf(createErr, "failed to create mds deployment %s", mdsConfig.ResourceName) - } - logger.Infof("deployment for mds %q already exists. updating if needed", mdsConfig.ResourceName) - _, err = c.context.Clientset.AppsV1().Deployments(c.fs.Namespace).Get(ctx, d.Name, metav1.GetOptions{}) - if err != nil { - return "", errors.Wrapf(err, "failed to get existing mds deployment %q for update", d.Name) - } - } - - if createErr != nil && kerrors.IsAlreadyExists(createErr) { - if err = UpdateDeploymentAndWait(c.context, c.clusterInfo, d, config.MdsType, daemonLetterID, c.clusterSpec.SkipUpgradeChecks, c.clusterSpec.ContinueUpgradeAfterChecksEvenIfNotHealthy); err != nil { - return "", errors.Wrapf(err, "failed to update mds deployment %q", d.Name) - } - } - return d.GetName(), nil -} - -// isCephUpgrade determine if mds version inferior than image -func (c *Cluster) isCephUpgrade() (bool, error) { - - allVersions, err := cephclient.GetAllCephDaemonVersions(c.context, c.clusterInfo) - if err != nil { - return false, err - } - for key := range allVersions.Mds { - currentVersion, err := cephver.ExtractCephVersion(key) - if err != nil { - return false, err - } - if cephver.IsSuperior(c.clusterInfo.CephVersion, *currentVersion) { - logger.Debugf("ceph version for MDS %q is %q and target version is %q", key, currentVersion, c.clusterInfo.CephVersion) - return true, err - } - } - - return false, nil -} - -func (c *Cluster) upgradeMDS() error { - - logger.Infof("upgrading MDS cluster for filesystem %q", c.fs.Name) - - // 1. set allow_standby_replay to false - if err := cephclient.AllowStandbyReplay(c.context, c.clusterInfo, c.fs.Name, false); err != nil { - return errors.Wrap(err, "failed to setting allow_standby_replay to false") - } - - // In Pacific, standby-replay daemons are stopped automatically. Older versions of Ceph require us to stop these daemons manually. - if err := cephclient.FailAllStandbyReplayMDS(c.context, c.clusterInfo, c.fs.Name); err != nil { - return errors.Wrap(err, "failed to fail mds agent in up:standby-replay state") - } - - // 2. set max_mds to 1 - logger.Debug("start setting active mds count to 1") - if err := cephclient.SetNumMDSRanks(c.context, c.clusterInfo, c.fs.Name, 1); err != nil { - return errors.Wrapf(err, "failed setting active mds count to %d", 1) - } - - // 3. wait for ranks to be 0 - if err := cephclient.WaitForActiveRanks(c.context, c.clusterInfo, c.fs.Name, 1, false, fsWaitForActiveTimeout); err != nil { - return errors.Wrap(err, "failed waiting for active ranks to be 1") - } - - // 4. stop standby daemons - daemonName, err := cephclient.GetMdsIdByRank(c.context, c.clusterInfo, c.fs.Name, 0) - if err != nil { - return errors.Wrap(err, "failed to get mds id from rank 0") - } - daemonNameTokens := strings.Split(daemonName, "-") - daemonLetterID := daemonNameTokens[len(daemonNameTokens)-1] - desiredDeployments := map[string]bool{ - fmt.Sprintf("%s-%s-%s", AppName, c.fs.Name, daemonLetterID): true, - } - logger.Debugf("stop mds other than %s", daemonName) - err = c.scaleDownDeployments(1, 1, desiredDeployments, false) - if err != nil { - return errors.Wrap(err, "failed to scale down deployments during upgrade") - } - logger.Debugf("waiting for all standbys gone") - if err := cephclient.WaitForNoStandbys(c.context, c.clusterInfo, 120*time.Second); err != nil { - return errors.Wrap(err, "failed to wait for stopping all standbys") - } - - // 5. upgrade current active deployment and wait for it come back - _, err = c.startDeployment(context.TODO(), daemonLetterID) - if err != nil { - return errors.Wrapf(err, "failed to upgrade mds %q", daemonName) - } - logger.Debugf("successfully started daemon %q", daemonName) - - // 6. all other MDS daemons will be updated and restarted by main MDS code path - - // 7. max_mds & allow_standby_replay will be reset in deferred function finishedWithDaemonUpgrade - - return nil -} - -func (c *Cluster) scaleDownDeployments(replicas int32, activeCount int32, desiredDeployments map[string]bool, delete bool) error { - // Remove extraneous mds deployments if they exist - deps, err := getMdsDeployments(c.context, c.fs.Namespace, c.fs.Name) - if err != nil { - return errors.Wrapf(err, - fmt.Sprintf("cannot verify the removal of extraneous mds deployments for filesystem %s. ", c.fs.Name)+ - fmt.Sprintf("USER should make sure that only deployments %+v exist which match the filesystem's label selector", desiredDeployments), - ) - } - if !(len(deps.Items) > int(replicas)) { - // It's possible to check if there are fewer deployments than desired here, but that's - // checked above, and if that condition exists here, it's likely the user's manual actions. - logger.Debugf("The number of mds deployments (%d) is not greater than the number desired (%d). no extraneous deployments to delete", - len(deps.Items), replicas) - return nil - } - errCount := 0 - for _, d := range deps.Items { - if _, ok := desiredDeployments[d.GetName()]; !ok { - // if the extraneous mdses are the only ones active, Ceph may experience fs downtime - // if deleting them too quickly; therefore, wait until number of active mdses is desired - if err := cephclient.WaitForActiveRanks(c.context, c.clusterInfo, c.fs.Name, activeCount, true, fsWaitForActiveTimeout); err != nil { - errCount++ - logger.Errorf( - "number of active mds ranks is not as desired. it is potentially unsafe to continue with extraneous mds deletion, so stopping. " + - fmt.Sprintf("USER should delete undesired mds daemons once filesystem %s is healthy. ", c.fs.Name) + - fmt.Sprintf("desired mds deployments for this filesystem are %+v", desiredDeployments) + - fmt.Sprintf(". %v", err), - ) - break // stop trying to delete daemons, but continue to reporting any errors below - } - - localdeployment := d - if !delete { - // stop mds daemon only by scaling deployment replicas to 0 - if err := scaleMdsDeployment(c.context, c.fs.Namespace, &localdeployment, 0); err != nil { - errCount++ - logger.Errorf("failed to scale mds deployment %q. %v", localdeployment.GetName(), err) - } - continue - } - if err := deleteMdsDeployment(c.context, c.fs.Namespace, &localdeployment); err != nil { - errCount++ - logger.Errorf("failed to delete mds deployment. %v", err) - } - - daemonName := strings.Replace(d.GetName(), fmt.Sprintf("%s-", AppName), "", -1) - err := c.DeleteMdsCephObjects(daemonName) - if err != nil { - logger.Errorf("%v", err) - } - } - } - if errCount > 0 { - return errors.Wrapf(err, "%d error(s) during deletion of extraneous mds deployments, see logs above", errCount) - } - deletedOrStopped := "deleted" - if !delete { - deletedOrStopped = "stopped" - } - logger.Infof("successfully %s unwanted MDS deployments", deletedOrStopped) - - return nil -} - -func (c *Cluster) DeleteMdsCephObjects(mdsID string) error { - monStore := config.GetMonStore(c.context, c.clusterInfo) - who := fmt.Sprintf("mds.%s", mdsID) - err := monStore.DeleteDaemon(who) - if err != nil { - return errors.Wrapf(err, "failed to delete mds config for %q in mon configuration database", who) - } - logger.Infof("successfully deleted mds config for %q in mon configuration database", who) - - err = cephclient.AuthDelete(c.context, c.clusterInfo, who) - if err != nil { - return err - } - logger.Infof("successfully deleted mds CephX key for %q", who) - return nil -} - -// finishedWithDaemonUpgrade performs all actions necessary to bring the filesystem back to its -// ideal state following an upgrade of its daemon(s). -func finishedWithDaemonUpgrade(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, fs cephv1.CephFilesystem) error { - fsName := fs.Name - activeMDSCount := fs.Spec.MetadataServer.ActiveCount - logger.Debugf("restoring filesystem %s from daemon upgrade", fsName) - logger.Debugf("bringing num active MDS daemons for fs %s back to %d", fsName, activeMDSCount) - // TODO: Unknown (Apr 2020) if this can be removed once Rook no longer supports Nautilus. - // upgrade guide according to nautilus https://docs.ceph.com/docs/nautilus/cephfs/upgrading/#upgrading-the-mds-cluster - if err := cephclient.SetNumMDSRanks(context, clusterInfo, fsName, activeMDSCount); err != nil { - return errors.Wrapf(err, "Failed to restore filesystem %s following daemon upgrade", fsName) - } - - // set allow_standby_replay back - if err := cephclient.AllowStandbyReplay(context, clusterInfo, fsName, fs.Spec.MetadataServer.ActiveStandby); err != nil { - return errors.Wrap(err, "failed to set allow_standby_replay to true") - } - - return nil -} diff --git a/pkg/operator/ceph/file/mds/spec.go b/pkg/operator/ceph/file/mds/spec.go deleted file mode 100644 index 56d146660..000000000 --- a/pkg/operator/ceph/file/mds/spec.go +++ /dev/null @@ -1,204 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mds - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - podIPEnvVar = "ROOK_POD_IP" - // MDS cache memory limit should be set to 50-60% of RAM reserved for the MDS container - // MDS uses approximately 125% of the value of mds_cache_memory_limit in RAM. - // Eventually we will tune this automatically: http://tracker.ceph.com/issues/36663 - mdsCacheMemoryLimitFactor = 0.5 - mdsCacheMemoryResourceFactor = 0.8 -) - -func (c *Cluster) makeDeployment(mdsConfig *mdsConfig, namespace string) (*apps.Deployment, error) { - - mdsContainer := c.makeMdsDaemonContainer(mdsConfig) - mdsContainer = config.ConfigureLivenessProbe(cephv1.KeyMds, mdsContainer, c.clusterSpec.HealthCheck) - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: mdsConfig.ResourceName, - Namespace: namespace, - Labels: c.podLabels(mdsConfig, true), - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - c.makeChownInitContainer(mdsConfig), - }, - Containers: []v1.Container{ - mdsContainer, - }, - RestartPolicy: v1.RestartPolicyAlways, - Volumes: controller.DaemonVolumes(mdsConfig.DataPathMap, mdsConfig.ResourceName), - HostNetwork: c.clusterSpec.Network.IsHost(), - PriorityClassName: c.fs.Spec.MetadataServer.PriorityClassName, - }, - } - - // Replace default unreachable node toleration - k8sutil.AddUnreachableNodeToleration(&podSpec.Spec) - - // If the log collector is enabled we add the side-car container - if c.clusterSpec.LogCollector.Enabled { - shareProcessNamespace := true - podSpec.Spec.ShareProcessNamespace = &shareProcessNamespace - podSpec.Spec.Containers = append(podSpec.Spec.Containers, *controller.LogCollectorContainer(fmt.Sprintf("ceph-mds.%s", mdsConfig.DaemonID), c.clusterInfo.Namespace, *c.clusterSpec)) - } - - c.fs.Spec.MetadataServer.Annotations.ApplyToObjectMeta(&podSpec.ObjectMeta) - c.fs.Spec.MetadataServer.Labels.ApplyToObjectMeta(&podSpec.ObjectMeta) - c.fs.Spec.MetadataServer.Placement.ApplyToPodSpec(&podSpec.Spec) - - replicas := int32(1) - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: mdsConfig.ResourceName, - Namespace: c.fs.Namespace, - Labels: c.podLabels(mdsConfig, true), - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: c.podLabels(mdsConfig, false), - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - - if c.clusterSpec.Network.IsHost() { - d.Spec.Template.Spec.DNSPolicy = v1.DNSClusterFirstWithHostNet - } else if c.clusterSpec.Network.IsMultus() { - if err := k8sutil.ApplyMultus(c.clusterSpec.Network, &podSpec.ObjectMeta); err != nil { - return nil, err - } - } - - k8sutil.AddRookVersionLabelToDeployment(d) - c.fs.Spec.MetadataServer.Annotations.ApplyToObjectMeta(&d.ObjectMeta) - c.fs.Spec.MetadataServer.Labels.ApplyToObjectMeta(&d.ObjectMeta) - controller.AddCephVersionLabelToDeployment(c.clusterInfo.CephVersion, d) - - return d, nil -} - -func (c *Cluster) makeChownInitContainer(mdsConfig *mdsConfig) v1.Container { - return controller.ChownCephDataDirsInitContainer( - *mdsConfig.DataPathMap, - c.clusterSpec.CephVersion.Image, - controller.DaemonVolumeMounts(mdsConfig.DataPathMap, mdsConfig.ResourceName), - c.fs.Spec.MetadataServer.Resources, - controller.PodSecurityContext(), - ) -} - -func (c *Cluster) makeMdsDaemonContainer(mdsConfig *mdsConfig) v1.Container { - args := append( - controller.DaemonFlags(c.clusterInfo, c.clusterSpec, mdsConfig.DaemonID), - "--foreground", - ) - - if !c.clusterSpec.Network.IsHost() { - args = append(args, - config.NewFlag("public-addr", controller.ContainerEnvVarReference(podIPEnvVar))) - } - - container := v1.Container{ - Name: "mds", - Command: []string{ - "ceph-mds", - }, - Args: args, - Image: c.clusterSpec.CephVersion.Image, - VolumeMounts: controller.DaemonVolumeMounts(mdsConfig.DataPathMap, mdsConfig.ResourceName), - Env: append(controller.DaemonEnvVars(c.clusterSpec.CephVersion.Image), k8sutil.PodIPEnvVar(podIPEnvVar)), - Resources: c.fs.Spec.MetadataServer.Resources, - SecurityContext: controller.PodSecurityContext(), - LivenessProbe: controller.GenerateLivenessProbeExecDaemon(config.MdsType, mdsConfig.DaemonID), - WorkingDir: config.VarLogCephDir, - } - - return container -} - -func (c *Cluster) podLabels(mdsConfig *mdsConfig, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, c.fs.Namespace, "mds", mdsConfig.DaemonID, includeNewLabels) - labels["rook_file_system"] = c.fs.Name - return labels -} - -func getMdsDeployments(context *clusterd.Context, namespace, fsName string) (*apps.DeploymentList, error) { - fsLabelSelector := fmt.Sprintf("rook_file_system=%s", fsName) - deps, err := k8sutil.GetDeployments(context.Clientset, namespace, fsLabelSelector) - if err != nil { - return nil, errors.Wrapf(err, "could not get deployments for filesystem %s (matching label selector %q)", fsName, fsLabelSelector) - } - return deps, nil -} - -func deleteMdsDeployment(clusterdContext *clusterd.Context, namespace string, deployment *apps.Deployment) error { - ctx := context.TODO() - // Delete the mds deployment - logger.Infof("deleting mds deployment %s", deployment.Name) - var gracePeriod int64 - propagation := metav1.DeletePropagationForeground - options := &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod, PropagationPolicy: &propagation} - if err := clusterdContext.Clientset.AppsV1().Deployments(namespace).Delete(ctx, deployment.GetName(), *options); err != nil { - return errors.Wrapf(err, "failed to delete mds deployment %s", deployment.GetName()) - } - return nil -} - -func scaleMdsDeployment(clusterdContext *clusterd.Context, namespace string, deployment *apps.Deployment, replicas int32) error { - ctx := context.TODO() - // scale mds deployment - logger.Infof("scaling mds deployment %q to %d replicas", deployment.Name, replicas) - d, err := clusterdContext.Clientset.AppsV1().Deployments(namespace).Get(ctx, deployment.GetName(), metav1.GetOptions{}) - if err != nil { - if replicas != 0 && kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to scale mds deployment %q to %d", deployment.GetName(), replicas) - } - } - // replicas already met requirement - if *d.Spec.Replicas == replicas { - return nil - } - *d.Spec.Replicas = replicas - if _, err := clusterdContext.Clientset.AppsV1().Deployments(namespace).Update(ctx, d, metav1.UpdateOptions{}); err != nil { - return errors.Wrapf(err, "failed to scale mds deployment %s to %d replicas", deployment.GetName(), replicas) - } - return nil -} diff --git a/pkg/operator/ceph/file/mds/spec_test.go b/pkg/operator/ceph/file/mds/spec_test.go deleted file mode 100644 index d6c9d53e4..000000000 --- a/pkg/operator/ceph/file/mds/spec_test.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mds - -import ( - "testing" - - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/test" - "github.com/rook/rook/pkg/operator/k8sutil" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - - testop "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func testDeploymentObject(t *testing.T, network cephv1.NetworkSpec) (*apps.Deployment, error) { - fs := cephv1.CephFilesystem{ - ObjectMeta: metav1.ObjectMeta{Name: "myfs", Namespace: "ns"}, - Spec: cephv1.FilesystemSpec{ - MetadataServer: cephv1.MetadataServerSpec{ - ActiveCount: 1, - ActiveStandby: false, - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(500.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(4337.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(250.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(2169.0, resource.BinarySI), - }, - }, - PriorityClassName: "my-priority-class", - }, - }, - } - clusterInfo := &cephclient.ClusterInfo{ - FSID: "myfsid", - CephVersion: cephver.Nautilus, - } - clientset := testop.New(t, 1) - - c := NewCluster( - clusterInfo, - &clusterd.Context{Clientset: clientset}, - &cephv1.ClusterSpec{ - CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:testversion"}, - Network: network, - }, - fs, - &cephclient.CephFilesystemDetails{ID: 15}, - &k8sutil.OwnerInfo{}, - "/var/lib/rook/", - ) - mdsTestConfig := &mdsConfig{ - DaemonID: "myfs-a", - ResourceName: "rook-ceph-mds-myfs-a", - DataPathMap: config.NewStatelessDaemonDataPathMap(config.MdsType, "myfs-a", "rook-ceph", "/var/lib/rook/"), - } - return c.makeDeployment(mdsTestConfig, "ns") -} - -func TestPodSpecs(t *testing.T) { - d, err := testDeploymentObject(t, cephv1.NetworkSpec{HostNetwork: false}) // no host network - assert.Nil(t, err) - - assert.NotNil(t, d) - assert.Equal(t, v1.RestartPolicyAlways, d.Spec.Template.Spec.RestartPolicy) - - // Deployment should have Ceph labels - test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.MdsType, "myfs-a", "rook-ceph-mds", "ns") - - podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) - podTemplate.RunFullSuite(config.MdsType, "myfs-a", "rook-ceph-mds", "ns", "quay.io/ceph/ceph:testversion", - "500", "250", "4337", "2169", /* resources */ - "my-priority-class") - - // assert --public-addr is appended to args - assert.Contains(t, d.Spec.Template.Spec.Containers[0].Args, - config.NewFlag("public-addr", controller.ContainerEnvVarReference(podIPEnvVar))) -} - -func TestHostNetwork(t *testing.T) { - d, err := testDeploymentObject(t, cephv1.NetworkSpec{HostNetwork: true}) // host network - assert.Nil(t, err) - - assert.Equal(t, true, d.Spec.Template.Spec.HostNetwork) - assert.Equal(t, v1.DNSClusterFirstWithHostNet, d.Spec.Template.Spec.DNSPolicy) - - // assert --public-addr is not appended to args - assert.NotContains(t, d.Spec.Template.Spec.Containers[0].Args, - config.NewFlag("public-addr", controller.ContainerEnvVarReference(podIPEnvVar))) -} diff --git a/pkg/operator/ceph/file/mirror/config.go b/pkg/operator/ceph/file/mirror/config.go deleted file mode 100644 index 2edc5bdf8..000000000 --- a/pkg/operator/ceph/file/mirror/config.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mirror - -import ( - "fmt" - - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" -) - -const ( - keyringTemplate = ` -[client.fs-mirror] - key = %s - caps mon = "allow profile cephfs-mirror" - caps mgr = "allow r" - caps mds = "allow r" - caps osd = "'allow rw tag cephfs metadata=*, allow r tag cephfs data=*'" -` - user = "client.fs-mirror" - userID = "fs-mirror" -) - -var ( - // PeerAdditionMinVersion This version includes a number of fixes for snapshots and mirror status - PeerAdditionMinVersion = version.CephVersion{Major: 16, Minor: 2, Extra: 5} -) - -// daemonConfig for a single rbd-mirror -type daemonConfig struct { - ResourceName string // the name rook gives to mirror resources in k8s metadata - DataPathMap *config.DataPathMap // location to store data in container - ownerInfo *k8sutil.OwnerInfo -} - -func (r *ReconcileFilesystemMirror) generateKeyring(clusterInfo *client.ClusterInfo, daemonConfig *daemonConfig) (string, error) { - access := []string{ - "mon", "allow profile cephfs-mirror", - "mgr", "allow r", - "mds", "allow r", - "osd", "allow rw tag cephfs metadata=*, allow r tag cephfs data=*", - } - s := keyring.GetSecretStore(r.context, clusterInfo, daemonConfig.ownerInfo) - - key, err := s.GenerateKey(user, access) - if err != nil { - return "", err - } - - keyring := fmt.Sprintf(keyringTemplate, key) - return keyring, s.CreateOrUpdate(daemonConfig.ResourceName, keyring) -} diff --git a/pkg/operator/ceph/file/mirror/controller.go b/pkg/operator/ceph/file/mirror/controller.go deleted file mode 100644 index 0f474959c..000000000 --- a/pkg/operator/ceph/file/mirror/controller.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mirror - -import ( - "context" - "fmt" - "reflect" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opconfig "github.com/rook/rook/pkg/operator/ceph/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - controllerName = "ceph-filesystem-mirror-controller" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -// List of object resources to watch by the controller -var objectsToWatch = []client.Object{ - &v1.ConfigMap{TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: v1.SchemeGroupVersion.String()}}, - &v1.Secret{TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: v1.SchemeGroupVersion.String()}}, - &appsv1.Deployment{TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.String()}}, -} - -var cephFilesystemMirrorKind = reflect.TypeOf(cephv1.CephFilesystemMirror{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephFilesystemMirrorKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -// ReconcileFilesystemMirror reconciles a CephFilesystemMirror object -type ReconcileFilesystemMirror struct { - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - client client.Client - scheme *runtime.Scheme - cephClusterSpec *cephv1.ClusterSpec -} - -// Add creates a new CephFilesystemMirror Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - return &ReconcileFilesystemMirror{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the CephFilesystemMirror CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephFilesystemMirror{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - // Watch all other resources - for _, t := range objectsToWatch { - err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cephv1.CephFilesystemMirror{}, - }, opcontroller.WatchPredicateForNonCRDObject(&cephv1.CephFilesystemMirror{TypeMeta: controllerTypeMeta}, mgr.GetScheme())) - if err != nil { - return err - } - } - - // Build Handler function to return the list of ceph object - // This is used by the watchers below - handlerFunc, err := opcontroller.ObjectToCRMapper(mgr.GetClient(), &cephv1.CephFilesystemMirrorList{}, mgr.GetScheme()) - if err != nil { - return err - } - - // Watch for CephCluster Spec changes that we want to propagate to us - err = c.Watch(&source.Kind{Type: &cephv1.CephCluster{ - TypeMeta: metav1.TypeMeta{ - Kind: opcontroller.ClusterResource.Kind, - APIVersion: opcontroller.ClusterResource.APIVersion, - }, - }, - }, handler.EnqueueRequestsFromMapFunc(handlerFunc), opcontroller.WatchCephClusterPredicate()) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a CephFilesystemMirror object and makes changes based on the state read -// and what is in the CephFilesystemMirror.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileFilesystemMirror) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, err := r.reconcile(request) - if err != nil { - updateStatus(r.client, request.NamespacedName, k8sutil.FailedStatus) - logger.Errorf("failed to reconcile %v", err) - } - - return reconcileResponse, err -} - -func (r *ReconcileFilesystemMirror) reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the CephFilesystemMirror instance - filesystemMirror := &cephv1.CephFilesystemMirror{} - err := r.client.Get(context.TODO(), request.NamespacedName, filesystemMirror) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephFilesystemMirror resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, errors.Wrap(err, "failed to get CephFilesystemMirror") - } - - // The CR was just created, initializing status fields - if filesystemMirror.Status == nil { - updateStatus(r.client, request.NamespacedName, k8sutil.EmptyStatus) - } - - // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, _, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - logger.Debugf("CephCluster resource not ready in namespace %q, retrying in %q.", request.NamespacedName.Namespace, reconcileResponse.RequeueAfter.String()) - return reconcileResponse, nil - } - - // Assign the clusterSpec - r.cephClusterSpec = &cephCluster.Spec - - // Populate clusterInfo - r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to populate cluster info") - } - - // Populate CephVersion - daemon := string(opconfig.MonType) - currentCephVersion, err := cephclient.LeastUptodateDaemonVersion(r.context, r.clusterInfo, daemon) - if err != nil { - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Info(opcontroller.OperatorNotInitializedMessage) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil - } - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to retrieve current ceph %q version", daemon) - } - r.clusterInfo.CephVersion = currentCephVersion - - // Validate Ceph version - if !currentCephVersion.IsAtLeastPacific() { - return opcontroller.ImmediateRetryResult, errors.Errorf("ceph pacific version is required to deploy cephfs mirroring, current cluster runs %q", currentCephVersion.String()) - } - - // CREATE/UPDATE - logger.Debug("reconciling ceph filesystem mirror deployments") - reconcileResponse, err = r.reconcileFilesystemMirror(filesystemMirror) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to create ceph filesystem mirror deployments") - } - - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, k8sutil.ReadyStatus) - - // Return and do not requeue - logger.Debug("done reconciling ceph filesystem mirror") - return reconcile.Result{}, nil - -} - -func (r *ReconcileFilesystemMirror) reconcileFilesystemMirror(filesystemMirror *cephv1.CephFilesystemMirror) (reconcile.Result, error) { - if r.cephClusterSpec.External.Enable { - _, err := opcontroller.ValidateCephVersionsBetweenLocalAndExternalClusters(r.context, r.clusterInfo) - if err != nil { - // This handles the case where the operator is running, the external cluster has been upgraded and a CR creation is called - // If that's a major version upgrade we fail, if it's a minor version, we continue, it's not ideal but not critical - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "refusing to run new crd") - } - } - - err := r.start(filesystemMirror) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to start filesystem mirror") - } - - return reconcile.Result{}, nil -} - -// updateStatus updates an object with a given status -func updateStatus(client client.Client, name types.NamespacedName, status string) { - fsMirror := &cephv1.CephFilesystemMirror{} - err := client.Get(context.TODO(), name, fsMirror) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephFilesystemMirror resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve filesystem mirror %q to update status to %q. %v", name, status, err) - return - } - - if fsMirror.Status == nil { - fsMirror.Status = &cephv1.Status{} - } - - fsMirror.Status.Phase = status - if err := reporting.UpdateStatus(client, fsMirror); err != nil { - logger.Errorf("failed to set filesystem mirror %q status to %q. %v", fsMirror.Name, status, err) - return - } - logger.Debugf("filesystem mirror %q status updated to %q", name, status) -} diff --git a/pkg/operator/ceph/file/mirror/controller_test.go b/pkg/operator/ceph/file/mirror/controller_test.go deleted file mode 100644 index f7821faf4..000000000 --- a/pkg/operator/ceph/file/mirror/controller_test.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package mirror to manage a rook filesystem -package mirror - -import ( - "context" - "os" - "testing" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - cephAuthGetOrCreateKey = `{"key":"AQCvzWBeIV9lFRAAninzm+8XFxbSfTiPwoX50g=="}` - dummyVersionsRaw = ` - { - "mon": { - "ceph version 14.2.8 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 3 - } - }` - pacificVersionsRaw = ` - { - "mon": { - "ceph version 16.2.1 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) pacific (stable)": 3 - } - }` -) - -func TestCephFilesystemMirrorController(t *testing.T) { - ctx := context.TODO() - var ( - name = "my-fs-mirror" - namespace = "rook-ceph" - ) - // Set DEBUG logging - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - os.Setenv("ROOK_LOG_LEVEL", "DEBUG") - - // - // TEST 1 SETUP - // - // FAILURE because no CephCluster - // - // An cephfs-mirror resource with metadata and spec. - fsMirror := &cephv1.CephFilesystemMirror{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: cephv1.FilesystemMirroringSpec{}, - TypeMeta: controllerTypeMeta, - } - - // Objects to track in the fake client. - object := []runtime.Object{ - fsMirror, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return cephAuthGetOrCreateKey, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - return "", nil - }, - } - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephFilesystemMirror{}) - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Create a ReconcileFilesystemMirror object with the scheme and fake client. - r := &ReconcileFilesystemMirror{client: cl, scheme: s, context: c} - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - logger.Info("STARTING PHASE 1") - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 1 DONE") - - // - // TEST 2: - // - // FAILURE we have a cluster but it's not ready - // - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - object = append(object, cephCluster) - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileFilesystemMirror object with the scheme and fake client. - r = &ReconcileFilesystemMirror{client: cl, scheme: s, context: c} - logger.Info("STARTING PHASE 2") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 2 DONE") - - // - // TEST 3: - // - // SUCCESS! The CephCluster is ready but version is too old! - // - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Create a ReconcileFilesystemMirror object with the scheme and fake client. - r = &ReconcileFilesystemMirror{ - client: cl, - scheme: s, - context: c, - } - res, err = r.Reconcile(ctx, req) - assert.Error(t, err) - assert.True(t, res.Requeue) - - // - // TEST 4: - // - // SUCCESS! The CephCluster is ready and running Ceph Pacific! - // - r.context.Executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return cephAuthGetOrCreateKey, nil - } - if args[0] == "versions" { - return pacificVersionsRaw, nil - } - return "", nil - }, - } - - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, fsMirror) - assert.NoError(t, err) - assert.Equal(t, "Ready", fsMirror.Status.Phase, fsMirror) -} diff --git a/pkg/operator/ceph/file/mirror/mirror.go b/pkg/operator/ceph/file/mirror/mirror.go deleted file mode 100644 index a804e2e7e..000000000 --- a/pkg/operator/ceph/file/mirror/mirror.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package mirror for mirroring -package mirror - -import ( - "context" - - "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -const ( - // AppName is the ceph filesystem mirror application name - AppName = "rook-ceph-fs-mirror" - // minimum amount of memory in MB to run the pod - cephFilesystemMirrorPodMinimumMemory uint64 = 512 -) - -var updateDeploymentAndWait = mon.UpdateCephDeploymentAndWait - -// Start begins the process of running filesystem mirroring daemons. -func (r *ReconcileFilesystemMirror) start(filesystemMirror *cephv1.CephFilesystemMirror) error { - ctx := context.TODO() - // Validate pod's memory if specified - err := controller.CheckPodMemory(cephv1.ResourcesKeyFilesystemMirror, filesystemMirror.Spec.Resources, cephFilesystemMirrorPodMinimumMemory) - if err != nil { - return errors.Wrap(err, "error checking pod memory") - } - - ownerInfo := k8sutil.NewOwnerInfo(filesystemMirror, r.scheme) - daemonConf := &daemonConfig{ - ResourceName: AppName, - DataPathMap: config.NewDatalessDaemonDataPathMap(filesystemMirror.Namespace, r.cephClusterSpec.DataDirHostPath), - ownerInfo: ownerInfo, - } - - _, err = r.generateKeyring(r.clusterInfo, daemonConf) - if err != nil { - return errors.Wrapf(err, "failed to generate keyring for %q", AppName) - } - - // Start the deployment - d, err := r.makeDeployment(daemonConf, filesystemMirror) - if err != nil { - return errors.Wrap(err, "failed to create filesystem-mirror deployment") - } - - // Set owner ref to filesystemMirror object - err = controllerutil.SetControllerReference(filesystemMirror, d, r.scheme) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference for ceph filesystem-mirror deployment %q", d.Name) - } - - // Set the deployment hash as an annotation - err = patch.DefaultAnnotator.SetLastAppliedAnnotation(d) - if err != nil { - return errors.Wrapf(err, "failed to set annotation for deployment %q", d.Name) - } - - if _, err := r.context.Clientset.AppsV1().Deployments(filesystemMirror.Namespace).Create(ctx, d, metav1.CreateOptions{}); err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create %q deployment", d.Name) - } - logger.Infof("deployment for filesystem-mirror %q already exists. updating if needed", d.Name) - - if err := updateDeploymentAndWait(r.context, r.clusterInfo, d, config.FilesystemMirrorType, AppName, r.cephClusterSpec.SkipUpgradeChecks, false); err != nil { - // fail could be an issue updating label selector (immutable), so try del and recreate - logger.Debugf("updateDeploymentAndWait failed for filesystem-mirror %q. Attempting del-and-recreate. %v", d.Name, err) - err = r.context.Clientset.AppsV1().Deployments(filesystemMirror.Namespace).Delete(ctx, filesystemMirror.Name, metav1.DeleteOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to delete filesystem-mirror deployment %q during del-and-recreate update attempt", d.Name) - } - if _, err := r.context.Clientset.AppsV1().Deployments(filesystemMirror.Namespace).Create(ctx, d, metav1.CreateOptions{}); err != nil { - return errors.Wrapf(err, "failed to recreate filesystem-mirror deployment %q during del-and-recreate update attempt", d.Name) - } - } - } - - logger.Infof("%q deployment started", AppName) - - return nil -} diff --git a/pkg/operator/ceph/file/mirror/spec.go b/pkg/operator/ceph/file/mirror/spec.go deleted file mode 100644 index b3bcccd0e..000000000 --- a/pkg/operator/ceph/file/mirror/spec.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mirror - -import ( - "fmt" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (r *ReconcileFilesystemMirror) makeDeployment(daemonConfig *daemonConfig, fsMirror *cephv1.CephFilesystemMirror) (*apps.Deployment, error) { - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: daemonConfig.ResourceName, - Namespace: fsMirror.Namespace, - Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, true), - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - r.makeChownInitContainer(daemonConfig, fsMirror), - }, - Containers: []v1.Container{ - r.makeFsMirroringDaemonContainer(daemonConfig, fsMirror), - }, - RestartPolicy: v1.RestartPolicyAlways, - Volumes: controller.DaemonVolumes(daemonConfig.DataPathMap, daemonConfig.ResourceName), - HostNetwork: r.cephClusterSpec.Network.IsHost(), - PriorityClassName: fsMirror.Spec.PriorityClassName, - }, - } - - // If the log collector is enabled we add the side-car container - if r.cephClusterSpec.LogCollector.Enabled { - shareProcessNamespace := true - podSpec.Spec.ShareProcessNamespace = &shareProcessNamespace - podSpec.Spec.Containers = append(podSpec.Spec.Containers, *controller.LogCollectorContainer(fmt.Sprintf("ceph-%s", user), r.clusterInfo.Namespace, *r.cephClusterSpec)) - } - - // Replace default unreachable node toleration - k8sutil.AddUnreachableNodeToleration(&podSpec.Spec) - fsMirror.Spec.Annotations.ApplyToObjectMeta(&podSpec.ObjectMeta) - fsMirror.Spec.Labels.ApplyToObjectMeta(&podSpec.ObjectMeta) - - if r.cephClusterSpec.Network.IsHost() { - podSpec.Spec.DNSPolicy = v1.DNSClusterFirstWithHostNet - } else if r.cephClusterSpec.Network.IsMultus() { - if err := k8sutil.ApplyMultus(r.cephClusterSpec.Network, &podSpec.ObjectMeta); err != nil { - return nil, err - } - } - fsMirror.Spec.Placement.ApplyToPodSpec(&podSpec.Spec) - - replicas := int32(1) - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: daemonConfig.ResourceName, - Namespace: fsMirror.Namespace, - Annotations: fsMirror.Spec.Annotations, - Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, true), - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: podSpec.Labels, - }, - Template: podSpec, - Replicas: &replicas, - }, - } - k8sutil.AddRookVersionLabelToDeployment(d) - controller.AddCephVersionLabelToDeployment(r.clusterInfo.CephVersion, d) - fsMirror.Spec.Annotations.ApplyToObjectMeta(&d.ObjectMeta) - fsMirror.Spec.Labels.ApplyToObjectMeta(&d.ObjectMeta) - - return d, nil -} - -func (r *ReconcileFilesystemMirror) makeChownInitContainer(daemonConfig *daemonConfig, fsMirror *cephv1.CephFilesystemMirror) v1.Container { - return controller.ChownCephDataDirsInitContainer( - *daemonConfig.DataPathMap, - r.cephClusterSpec.CephVersion.Image, - controller.DaemonVolumeMounts(daemonConfig.DataPathMap, daemonConfig.ResourceName), - fsMirror.Spec.Resources, - controller.PodSecurityContext(), - ) -} - -func (r *ReconcileFilesystemMirror) makeFsMirroringDaemonContainer(daemonConfig *daemonConfig, fsMirror *cephv1.CephFilesystemMirror) v1.Container { - container := v1.Container{ - Name: "fs-mirror", - Command: []string{ - "cephfs-mirror", - }, - Args: append( - controller.DaemonFlags(r.clusterInfo, r.cephClusterSpec, userID), - "--foreground", - "--name="+user, - ), - Image: r.cephClusterSpec.CephVersion.Image, - VolumeMounts: controller.DaemonVolumeMounts(daemonConfig.DataPathMap, daemonConfig.ResourceName), - Env: controller.DaemonEnvVars(r.cephClusterSpec.CephVersion.Image), - Resources: fsMirror.Spec.Resources, - SecurityContext: controller.PodSecurityContext(), - // TODO: - // LivenessProbe: controller.GenerateLivenessProbeExecDaemon(config.fsMirrorType, daemonConfig.DaemonID), - } - - return container -} diff --git a/pkg/operator/ceph/file/mirror/spec_test.go b/pkg/operator/ceph/file/mirror/spec_test.go deleted file mode 100644 index 8a950d792..000000000 --- a/pkg/operator/ceph/file/mirror/spec_test.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mirror - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/test" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestPodSpec(t *testing.T) { - namespace := "ns" - daemonConf := daemonConfig{ - ResourceName: "rook-ceph-fs-mirror", - DataPathMap: config.NewDatalessDaemonDataPathMap("rook-ceph", "/var/lib/rook"), - } - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Spec: cephv1.ClusterSpec{ - CephVersion: cephv1.CephVersionSpec{ - Image: "quay.io/ceph/ceph:v16", - }, - }, - } - - fsMirror := &cephv1.CephFilesystemMirror{ - ObjectMeta: metav1.ObjectMeta{ - Name: userID, - Namespace: namespace, - }, - Spec: cephv1.FilesystemMirroringSpec{ - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(200.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(600.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(300.0, resource.BinarySI), - }, - }, - PriorityClassName: "my-priority-class", - }, - TypeMeta: controllerTypeMeta, - } - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.Nautilus, - } - s := scheme.Scheme - object := []runtime.Object{fsMirror} - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - r := &ReconcileFilesystemMirror{client: cl, scheme: s} - r.cephClusterSpec = &cephCluster.Spec - r.clusterInfo = clusterInfo - - d, err := r.makeDeployment(&daemonConf, fsMirror) - assert.NoError(t, err) - assert.Equal(t, "rook-ceph-fs-mirror", d.Name) - assert.Equal(t, 4, len(d.Spec.Template.Spec.Volumes)) - assert.Equal(t, 1, len(d.Spec.Template.Spec.Volumes[0].Projected.Sources)) - assert.Equal(t, 4, len(d.Spec.Template.Spec.Containers[0].VolumeMounts)) - - // Deployment should have Ceph labels - test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.FilesystemMirrorType, userID, AppName, "ns") - - podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) - podTemplate.RunFullSuite(config.FilesystemMirrorType, userID, AppName, "ns", "quay.io/ceph/ceph:v16", - "200", "100", "600", "300", /* resources */ - "my-priority-class") -} diff --git a/pkg/operator/ceph/file/status.go b/pkg/operator/ceph/file/status.go deleted file mode 100644 index 7f05afef5..000000000 --- a/pkg/operator/ceph/file/status.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package file manages a CephFS filesystem and the required daemons. -package file - -import ( - "context" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/reporting" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// updateStatus updates a fs CR with the given status -func updateStatus(client client.Client, namespacedName types.NamespacedName, status cephv1.ConditionType, info map[string]string) { - fs := &cephv1.CephFilesystem{} - err := client.Get(context.TODO(), namespacedName, fs) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephFilesystem resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve filesystem %q to update status to %q. %v", namespacedName, status, err) - return - } - - if fs.Status == nil { - fs.Status = &cephv1.CephFilesystemStatus{} - } - - fs.Status.Phase = status - fs.Status.Info = info - if err := reporting.UpdateStatus(client, fs); err != nil { - logger.Warningf("failed to set filesystem %q status to %q. %v", fs.Name, status, err) - return - } - logger.Debugf("filesystem %q status updated to %q", fs.Name, status) -} - -// updateStatusBucket updates an object with a given status -func (c *mirrorChecker) updateStatusMirroring(mirrorStatus []cephv1.FilesystemMirroringInfo, snapSchedStatus []cephv1.FilesystemSnapshotSchedulesSpec, details string) { - fs := &cephv1.CephFilesystem{} - if err := c.client.Get(context.TODO(), c.namespacedName, fs); err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephFilesystem resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve ceph filesystem %q to update mirroring status. %v", c.namespacedName.Name, err) - return - } - if fs.Status == nil { - fs.Status = &cephv1.CephFilesystemStatus{} - } - - // Update the CephFilesystem CR status field - fs.Status = toCustomResourceStatus(fs.Status, mirrorStatus, snapSchedStatus, details) - if err := reporting.UpdateStatus(c.client, fs); err != nil { - logger.Errorf("failed to set ceph filesystem %q mirroring status. %v", c.namespacedName.Name, err) - return - } - - logger.Debugf("ceph filesystem %q mirroring status updated", c.namespacedName.Name) -} - -func toCustomResourceStatus(currentStatus *cephv1.CephFilesystemStatus, mirrorStatus []cephv1.FilesystemMirroringInfo, snapSchedStatus []cephv1.FilesystemSnapshotSchedulesSpec, details string) *cephv1.CephFilesystemStatus { - mirrorStatusSpec := &cephv1.FilesystemMirroringInfoSpec{} - mirrorSnapScheduleStatusSpec := &cephv1.FilesystemSnapshotScheduleStatusSpec{} - now := time.Now().UTC().Format(time.RFC3339) - - // MIRROR - if len(mirrorStatus) != 0 { - mirrorStatusSpec.LastChecked = now - mirrorStatusSpec.FilesystemMirroringAllInfo = mirrorStatus - } - - // Always display the details, typically an error - mirrorStatusSpec.Details = details - - if currentStatus != nil { - if currentStatus.MirroringStatus != nil { - mirrorStatusSpec.LastChanged = currentStatus.MirroringStatus.LastChanged - } - if currentStatus.SnapshotScheduleStatus != nil { - mirrorStatusSpec.LastChanged = currentStatus.SnapshotScheduleStatus.LastChanged - } - } - - // SNAP SCHEDULE - if len(snapSchedStatus) != 0 { - mirrorSnapScheduleStatusSpec.LastChecked = now - mirrorSnapScheduleStatusSpec.SnapshotSchedules = snapSchedStatus - } - // Always display the details, typically an error - mirrorSnapScheduleStatusSpec.Details = details - - return &cephv1.CephFilesystemStatus{MirroringStatus: mirrorStatusSpec, SnapshotScheduleStatus: mirrorSnapScheduleStatusSpec, Phase: currentStatus.Phase, Info: currentStatus.Info} -} diff --git a/pkg/operator/ceph/nfs/config.go b/pkg/operator/ceph/nfs/config.go deleted file mode 100644 index 67de962c6..000000000 --- a/pkg/operator/ceph/nfs/config.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "fmt" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" -) - -const ( - keyringTemplate = ` -[%s] - key = %s - caps mon = "allow r" - caps osd = "%s" -` -) - -func getNFSUserID(nodeID string) string { - return fmt.Sprintf("nfs-ganesha.%s", nodeID) -} - -func getNFSClientID(n *cephv1.CephNFS, name string) string { - return fmt.Sprintf("client.%s", getNFSUserID(getNFSNodeID(n, name))) -} - -func getNFSNodeID(n *cephv1.CephNFS, name string) string { - return fmt.Sprintf("%s.%s", n.Name, name) -} - -func getGaneshaConfigObject(n *cephv1.CephNFS, version cephver.CephVersion, name string) string { - /* Exports created with Dashboard will not be affected by change in config object name. - * As it looks for ganesha config object just by 'conf-'. Exports cannot be created by using - * volume/nfs plugin in Octopus version. Because the ceph rook module is broken. - */ - if version.IsAtLeastOctopus() { - return fmt.Sprintf("conf-nfs.%s", n.Name) - } - return fmt.Sprintf("conf-%s", getNFSNodeID(n, name)) -} - -func getRadosURL(n *cephv1.CephNFS, version cephver.CephVersion, name string) string { - url := fmt.Sprintf("rados://%s/", n.Spec.RADOS.Pool) - - if n.Spec.RADOS.Namespace != "" { - url += n.Spec.RADOS.Namespace + "/" - } - - url += getGaneshaConfigObject(n, version, name) - return url -} - -func (r *ReconcileCephNFS) generateKeyring(n *cephv1.CephNFS, name string) error { - osdCaps := fmt.Sprintf("allow rw pool=%s", n.Spec.RADOS.Pool) - if n.Spec.RADOS.Namespace != "" { - osdCaps = fmt.Sprintf("%s namespace=%s", osdCaps, n.Spec.RADOS.Namespace) - } - - caps := []string{"mon", "allow r", "osd", osdCaps} - user := getNFSClientID(n, name) - - ownerInfo := k8sutil.NewOwnerInfo(n, r.scheme) - s := keyring.GetSecretStore(r.context, r.clusterInfo, ownerInfo) - - key, err := s.GenerateKey(user, caps) - if err != nil { - return errors.Wrapf(err, "failed to create user %s", user) - } - - keyring := fmt.Sprintf(keyringTemplate, user, key, osdCaps) - return s.CreateOrUpdate(instanceName(n, name), keyring) -} - -func getGaneshaConfig(n *cephv1.CephNFS, version cephver.CephVersion, name string) string { - nodeID := getNFSNodeID(n, name) - userID := getNFSUserID(nodeID) - url := getRadosURL(n, version, name) - return ` -NFS_CORE_PARAM { - Enable_NLM = false; - Enable_RQUOTA = false; - Protocols = 4; -} - -MDCACHE { - Dir_Chunk = 0; -} - -EXPORT_DEFAULTS { - Attr_Expiration_Time = 0; -} - -NFSv4 { - Delegations = false; - RecoveryBackend = 'rados_cluster'; - Minor_Versions = 1, 2; -} - -RADOS_KV { - ceph_conf = '` + cephclient.DefaultConfigFilePath() + `'; - userid = ` + userID + `; - nodeid = ` + nodeID + `; - pool = "` + n.Spec.RADOS.Pool + `"; - namespace = "` + n.Spec.RADOS.Namespace + `"; -} - -RADOS_URLS { - ceph_conf = '` + cephclient.DefaultConfigFilePath() + `'; - userid = ` + userID + `; - watch_url = '` + url + `'; -} - -%url ` + url + ` -` -} diff --git a/pkg/operator/ceph/nfs/controller.go b/pkg/operator/ceph/nfs/controller.go deleted file mode 100644 index 32f5cd913..000000000 --- a/pkg/operator/ceph/nfs/controller.go +++ /dev/null @@ -1,311 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "fmt" - "reflect" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opconfig "github.com/rook/rook/pkg/operator/ceph/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - controllerName = "ceph-nfs-controller" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -// List of object resources to watch by the controller -var objectsToWatch = []client.Object{ - &v1.Service{TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: v1.SchemeGroupVersion.String()}}, - &v1.ConfigMap{TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: v1.SchemeGroupVersion.String()}}, - &appsv1.Deployment{TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.String()}}, -} - -var cephNFSKind = reflect.TypeOf(cephv1.CephNFS{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephNFSKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -// ReconcileCephNFS reconciles a cephNFS object -type ReconcileCephNFS struct { - client client.Client - scheme *runtime.Scheme - context *clusterd.Context - cephClusterSpec *cephv1.ClusterSpec - clusterInfo *cephclient.ClusterInfo -} - -// Add creates a new cephNFS Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - - return &ReconcileCephNFS{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the cephNFS CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephNFS{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - // Watch all other resources - for _, t := range objectsToWatch { - err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cephv1.CephNFS{}, - }, opcontroller.WatchPredicateForNonCRDObject(&cephv1.CephNFS{TypeMeta: controllerTypeMeta}, mgr.GetScheme())) - if err != nil { - return err - } - } - - return nil -} - -// Reconcile reads that state of the cluster for a cephNFS object and makes changes based on the state read -// and what is in the cephNFS.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileCephNFS) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, err := r.reconcile(request) - if err != nil { - logger.Errorf("failed to reconcile %v", err) - } - - return reconcileResponse, err -} - -func (r *ReconcileCephNFS) reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the cephNFS instance - cephNFS := &cephv1.CephNFS{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephNFS) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("cephNFS resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, errors.Wrap(err, "failed to get cephNFS") - } - - // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephNFS) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") - } - - // The CR was just created, initializing status fields - if cephNFS.Status == nil { - updateStatus(r.client, request.NamespacedName, k8sutil.EmptyStatus) - } - - // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - // This handles the case where the Ceph Cluster is gone and we want to delete that CR - // We skip the deleteStore() function since everything is gone already - // - // Also, only remove the finalizer if the CephCluster is gone - // If not, we should wait for it to be ready - // This handles the case where the operator is not ready to accept Ceph command but the cluster exists - if !cephNFS.GetDeletionTimestamp().IsZero() && !cephClusterExists { - // Remove finalizer - err := opcontroller.RemoveFinalizer(r.client, cephNFS) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - return reconcileResponse, nil - } - r.cephClusterSpec = &cephCluster.Spec - - // Populate clusterInfo - // Always populate it during each reconcile - r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to populate cluster info") - } - - // Populate CephVersion - currentCephVersion, err := cephclient.LeastUptodateDaemonVersion(r.context, r.clusterInfo, opconfig.MonType) - if err != nil { - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Info(opcontroller.OperatorNotInitializedMessage) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil - } - return reconcile.Result{}, errors.Wrapf(err, "failed to retrieve current ceph %q version", opconfig.MonType) - } - r.clusterInfo.CephVersion = currentCephVersion - - // DELETE: the CR was deleted - if !cephNFS.GetDeletionTimestamp().IsZero() { - logger.Infof("deleting ceph nfs %q", cephNFS.Name) - err := r.removeServersFromDatabase(cephNFS, 0) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to delete filesystem %q. ", cephNFS.Name) - } - - // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephNFS) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - - // validate the store settings - if err := validateGanesha(r.context, r.clusterInfo, cephNFS); err != nil { - return reconcile.Result{}, errors.Wrapf(err, "invalid ceph nfs %q arguments", cephNFS.Name) - } - - // CREATE/UPDATE - logger.Debug("reconciling ceph nfs deployments") - _, err = r.reconcileCreateCephNFS(cephNFS) - if err != nil { - updateStatus(r.client, request.NamespacedName, k8sutil.FailedStatus) - return reconcile.Result{}, errors.Wrap(err, "failed to create ceph nfs deployments") - } - - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, k8sutil.ReadyStatus) - - // Return and do not requeue - logger.Debug("done reconciling ceph nfs") - return reconcile.Result{}, nil - -} - -func (r *ReconcileCephNFS) reconcileCreateCephNFS(cephNFS *cephv1.CephNFS) (reconcile.Result, error) { - ctx := context.TODO() - if r.cephClusterSpec.External.Enable { - _, err := opcontroller.ValidateCephVersionsBetweenLocalAndExternalClusters(r.context, r.clusterInfo) - if err != nil { - // This handles the case where the operator is running, the external cluster has been upgraded and a CR creation is called - // If that's a major version upgrade we fail, if it's a minor version, we continue, it's not ideal but not critical - return reconcile.Result{}, errors.Wrap(err, "refusing to run new crd") - } - } - - deployments, err := r.context.Clientset.AppsV1().Deployments(cephNFS.Namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("app=%s", AppName)}) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Infof("creating ceph nfs %q", cephNFS.Name) - err := r.upCephNFS(cephNFS) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to create ceph nfs %q", cephNFS.Name) - } - return reconcile.Result{}, nil - } - return reconcile.Result{}, errors.Wrap(err, "failed to list ceph nfs deployments") - } - - nfsServerListNum := len(deployments.Items) - // Scale down case (CR value cephNFS.Spec.Server.Active changed) - if nfsServerListNum > cephNFS.Spec.Server.Active { - logger.Infof("scaling down ceph nfs %q from %d to %d", cephNFS.Name, nfsServerListNum, cephNFS.Spec.Server.Active) - err := r.downCephNFS(cephNFS, nfsServerListNum) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to scale down ceph nfs %q", cephNFS.Name) - } - } - // Update existing deployments and create new ones in the scale up case - logger.Infof("updating ceph nfs %q", cephNFS.Name) - err = r.upCephNFS(cephNFS) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to update ceph nfs %q", cephNFS.Name) - } - - return reconcile.Result{}, nil -} - -// updateStatus updates an object with a given status -func updateStatus(client client.Client, name types.NamespacedName, status string) { - nfs := &cephv1.CephNFS{} - err := client.Get(context.TODO(), name, nfs) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephNFS resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve nfs %q to update status to %q. %v", name, status, err) - return - } - if nfs.Status == nil { - nfs.Status = &cephv1.Status{} - } - - nfs.Status.Phase = status - if err := reporting.UpdateStatus(client, nfs); err != nil { - logger.Errorf("failed to set nfs %q status to %q. %v", nfs.Name, status, err) - } - logger.Debugf("nfs %q status updated to %q", name, status) -} diff --git a/pkg/operator/ceph/nfs/controller_test.go b/pkg/operator/ceph/nfs/controller_test.go deleted file mode 100644 index c013e26d5..000000000 --- a/pkg/operator/ceph/nfs/controller_test.go +++ /dev/null @@ -1,280 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package nfs to manage a rook ceph nfs -package nfs - -import ( - "context" - "errors" - "os" - "testing" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var ( - name = "my-nfs" - namespace = "rook-ceph" - nfsCephAuthGetOrCreateKey = `{"key":"AQCvzWBeIV9lFRAAninzm+8XFxbSfTiPwoX50g=="}` - dummyVersionsRaw = ` - { - "mon": { - "ceph version 14.2.8 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 3 - } - }` - poolDetails = `{ - "pool": "foo", - "pool_id": 1, - "size": 3, - "min_size": 2, - "pg_num": 8, - "pgp_num": 8, - "crush_rule": "replicated_rule", - "hashpspool": true, - "nodelete": false, - "nopgchange": false, - "nosizechange": false, - "write_fadvise_dontneed": false, - "noscrub": false, - "nodeep-scrub": false, - "use_gmt_hitset": true, - "fast_read": 0, - "pg_autoscale_mode": "on" - }` -) - -func TestCephNFSController(t *testing.T) { - ctx := context.TODO() - // Set DEBUG logging - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - os.Setenv("ROOK_LOG_LEVEL", "DEBUG") - - // - // TEST 1 SETUP - // - // FAILURE because no CephCluster - // - // A Pool resource with metadata and spec. - cephNFS := &cephv1.CephNFS{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: cephv1.NFSGaneshaSpec{ - RADOS: cephv1.GaneshaRADOSSpec{ - Pool: "foo", - Namespace: namespace, - }, - Server: cephv1.GaneshaServerSpec{ - Active: 1, - }, - }, - TypeMeta: controllerTypeMeta, - } - - // Objects to track in the fake client. - object := []runtime.Object{ - cephNFS, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - return "", nil - }, - } - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephNFS{}) - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileCephNFS object with the scheme and fake client. - r := &ReconcileCephNFS{client: cl, scheme: s, context: c} - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - logger.Info("STARTING PHASE 1") - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 1 DONE") - - // - // TEST 2: - // - // FAILURE we have a cluster but it's not ready - // - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - object = append(object, cephCluster) - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileCephNFS object with the scheme and fake client. - r = &ReconcileCephNFS{client: cl, scheme: s, context: c} - logger.Info("STARTING PHASE 2") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 2 DONE") - - // - // TEST 3: - // - // SUCCESS! The CephCluster is ready - // - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return nfsCephAuthGetOrCreateKey, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - if args[0] == "osd" && args[1] == "pool" && args[2] == "get" { - return poolDetails, nil - } - return "", errors.New("unknown command") - }, - MockExecuteCommand: func(command string, args ...string) error { - if command == "rados" { - logger.Infof("mock execute. %s. %s", command, args) - assert.Equal(t, "stat", args[6]) - assert.Equal(t, "conf-my-nfs.a", args[7]) - return nil - } - return errors.New("unknown command") - }, - MockExecuteCommandWithEnv: func(env []string, command string, args ...string) error { - if command == "ganesha-rados-grace" { - logger.Infof("mock execute. %s. %s", command, args) - assert.Equal(t, "add", args[4]) - assert.Len(t, env, 1) - return nil - } - return errors.New("unknown command") - }, - } - c.Executor = executor - - // Create a ReconcileCephNFS object with the scheme and fake client. - r = &ReconcileCephNFS{client: cl, scheme: s, context: c} - - logger.Info("STARTING PHASE 3") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, cephNFS) - assert.NoError(t, err) - assert.Equal(t, "Ready", cephNFS.Status.Phase, cephNFS) - logger.Info("PHASE 3 DONE") -} - -func TestGetGaneshaConfigObject(t *testing.T) { - cephNFS := &cephv1.CephNFS{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } - nodeid := "a" - expectedName := "conf-nfs.my-nfs" - - res := getGaneshaConfigObject(cephNFS, cephver.CephVersion{Major: 16}, nodeid) - logger.Infof("Config Object for Pacific is %s", res) - assert.Equal(t, expectedName, res) - - res = getGaneshaConfigObject(cephNFS, cephver.CephVersion{Major: 15, Minor: 2, Extra: 1}, nodeid) - logger.Infof("Config Object for Octopus is %s", res) - assert.Equal(t, expectedName, res) - - res = getGaneshaConfigObject(cephNFS, cephver.CephVersion{Major: 14, Minor: 2, Extra: 5}, nodeid) - logger.Infof("Config Object for Nautilus is %s", res) - assert.Equal(t, "conf-my-nfs.a", res) -} diff --git a/pkg/operator/ceph/nfs/nfs.go b/pkg/operator/ceph/nfs/nfs.go deleted file mode 100644 index 11bfc8f20..000000000 --- a/pkg/operator/ceph/nfs/nfs.go +++ /dev/null @@ -1,283 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package nfs manages NFS ganesha servers for Ceph -package nfs - -import ( - "context" - "fmt" - - "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - opmon "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -const ( - ganeshaRadosGraceCmd = "ganesha-rados-grace" -) - -var updateDeploymentAndWait = opmon.UpdateCephDeploymentAndWait - -type daemonConfig struct { - ID string // letter ID of daemon (e.g., a, b, c, ...) - ConfigConfigMap string // name of configmap holding config - DataPathMap *config.DataPathMap // location to store data in container -} - -// Create the ganesha server -func (r *ReconcileCephNFS) upCephNFS(n *cephv1.CephNFS) error { - ctx := context.TODO() - for i := 0; i < n.Spec.Server.Active; i++ { - id := k8sutil.IndexToName(i) - - configName, err := r.createConfigMap(n, id) - if err != nil { - return errors.Wrap(err, "failed to create config") - } - - err = r.addRADOSConfigFile(n, id) - if err != nil { - return errors.Wrap(err, "failed to create RADOS config object") - } - - cfg := daemonConfig{ - ID: id, - ConfigConfigMap: configName, - DataPathMap: &config.DataPathMap{ - HostDataDir: "", // nfs daemon does not store data on host, ... - ContainerDataDir: cephclient.DefaultConfigDir, // does share data in containers using emptyDir, ... - HostLogAndCrashDir: "", // and does not log to /var/log/ceph dir - }, - } - - err = r.generateKeyring(n, id) - if err != nil { - return errors.Wrapf(err, "failed to generate keyring for %q", id) - } - - // create the deployment - deployment, err := r.makeDeployment(n, cfg) - if err != nil { - return errors.Wrap(err, "failed to set to create deployment") - } - // Set owner ref to cephNFS object - err = controllerutil.SetControllerReference(n, deployment, r.scheme) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference for ceph nfs deployment %q", deployment.Name) - } - - // Set the deployment hash as an annotation - err = patch.DefaultAnnotator.SetLastAppliedAnnotation(deployment) - if err != nil { - return errors.Wrapf(err, "failed to set annotation for deployment %q", deployment.Name) - } - - // start the deployment - _, err = r.context.Clientset.AppsV1().Deployments(n.Namespace).Create(ctx, deployment, metav1.CreateOptions{}) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create ceph nfs deployment") - } - logger.Infof("ceph nfs deployment %q already exists. updating if needed", deployment.Name) - if err := updateDeploymentAndWait(r.context, r.clusterInfo, deployment, "nfs", id, r.cephClusterSpec.SkipUpgradeChecks, false); err != nil { - return errors.Wrapf(err, "failed to update ceph nfs deployment %q", deployment.Name) - } - } else { - logger.Infof("ceph nfs deployment %q started", deployment.Name) - } - - // create a service - err = r.createCephNFSService(n, cfg) - if err != nil { - return errors.Wrap(err, "failed to create ceph nfs service") - } - - // Add server to database - err = r.addServerToDatabase(n, id) - if err != nil { - return errors.Wrapf(err, "failed to add server %q to database", id) - } - } - - return nil -} - -// Create empty config file for new ganesha server -func (r *ReconcileCephNFS) addRADOSConfigFile(n *cephv1.CephNFS, name string) error { - config := getGaneshaConfigObject(n, r.clusterInfo.CephVersion, name) - cmd := "rados" - args := []string{ - "--pool", n.Spec.RADOS.Pool, - "--namespace", n.Spec.RADOS.Namespace, - "--conf", cephclient.CephConfFilePath(r.context.ConfigDir, n.Namespace), - } - err := r.context.Executor.ExecuteCommand(cmd, append(args, "stat", config)...) - if err == nil { - // If stat works then we assume it's present already - return nil - } - - // try to create it - return r.context.Executor.ExecuteCommand(cmd, append(args, "create", config)...) -} - -func (r *ReconcileCephNFS) addServerToDatabase(nfs *cephv1.CephNFS, name string) error { - logger.Infof("adding ganesha %q to grace db", name) - - if err := r.runGaneshaRadosGrace(nfs, name, "add"); err != nil { - return errors.Wrapf(err, "failed to add %q to grace db", name) - } - - return nil -} - -func (r *ReconcileCephNFS) removeServerFromDatabase(nfs *cephv1.CephNFS, name string) { - logger.Infof("removing ganesha %q from grace db", name) - - if err := r.runGaneshaRadosGrace(nfs, name, "remove"); err != nil { - logger.Errorf("failed to remove %q from grace db. %v", name, err) - } -} - -func (r *ReconcileCephNFS) runGaneshaRadosGrace(nfs *cephv1.CephNFS, name, action string) error { - nodeID := getNFSNodeID(nfs, name) - cmd := ganeshaRadosGraceCmd - args := []string{"--pool", nfs.Spec.RADOS.Pool, "--ns", nfs.Spec.RADOS.Namespace, action, nodeID} - env := []string{fmt.Sprintf("CEPH_CONF=%s", cephclient.CephConfFilePath(r.context.ConfigDir, nfs.Namespace))} - - return r.context.Executor.ExecuteCommandWithEnv(env, cmd, args...) -} - -func (r *ReconcileCephNFS) generateConfigMap(n *cephv1.CephNFS, name string) *v1.ConfigMap { - - data := map[string]string{ - "config": getGaneshaConfig(n, r.clusterInfo.CephVersion, name), - } - configMap := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: instanceName(n, name), - Namespace: n.Namespace, - Labels: getLabels(n, name, true), - }, - Data: data, - } - - return configMap -} - -func (r *ReconcileCephNFS) createConfigMap(n *cephv1.CephNFS, name string) (string, error) { - ctx := context.TODO() - // Generate configMap - configMap := r.generateConfigMap(n, name) - - // Set owner reference - err := controllerutil.SetControllerReference(n, configMap, r.scheme) - if err != nil { - return "", errors.Wrapf(err, "failed to set owner reference for ceph ganesha configmap %q", configMap.Name) - } - - if _, err := r.context.Clientset.CoreV1().ConfigMaps(n.Namespace).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { - if !kerrors.IsAlreadyExists(err) { - return "", errors.Wrap(err, "failed to create ganesha config map") - } - - logger.Debugf("updating config map %q that already exists", configMap.Name) - if _, err = r.context.Clientset.CoreV1().ConfigMaps(n.Namespace).Update(ctx, configMap, metav1.UpdateOptions{}); err != nil { - return "", errors.Wrap(err, "failed to update ganesha config map") - } - } - - return configMap.Name, nil -} - -// Down scale the ganesha server -func (r *ReconcileCephNFS) downCephNFS(n *cephv1.CephNFS, nfsServerListNum int) error { - ctx := context.TODO() - diffCount := nfsServerListNum - n.Spec.Server.Active - for i := 0; i < diffCount; { - depIDToRemove := nfsServerListNum - 1 - - name := k8sutil.IndexToName(depIDToRemove) - depNameToRemove := instanceName(n, name) - - // Remove deployment - logger.Infof("removing deployment %q", depNameToRemove) - err := r.context.Clientset.AppsV1().Deployments(n.Namespace).Delete(ctx, depNameToRemove, metav1.DeleteOptions{}) - if err != nil { - if !kerrors.IsNotFound(err) { - return errors.Wrap(err, "failed to delete ceph nfs deployment") - } - } - - // Remove from grace db - r.removeServerFromDatabase(n, name) - - nfsServerListNum = nfsServerListNum - 1 - i++ - } - - return nil -} - -func (r *ReconcileCephNFS) removeServersFromDatabase(n *cephv1.CephNFS, newActive int) error { - for i := n.Spec.Server.Active - 1; i >= newActive; i-- { - name := k8sutil.IndexToName(i) - r.removeServerFromDatabase(n, name) - } - - return nil -} -func instanceName(n *cephv1.CephNFS, name string) string { - return fmt.Sprintf("%s-%s-%s", AppName, n.Name, name) -} - -func validateGanesha(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, n *cephv1.CephNFS) error { - // core properties - if n.Name == "" { - return errors.New("missing name") - } - if n.Namespace == "" { - return errors.New("missing namespace") - } - - // Client recovery properties - if n.Spec.RADOS.Pool == "" { - return errors.New("missing RADOS.pool") - } - - // Ganesha server properties - if n.Spec.Server.Active == 0 { - return errors.New("at least one active server required") - } - - // The existence of the pool provided in n.Spec.RADOS.Pool is necessary otherwise addRADOSConfigFile() will fail - _, err := cephclient.GetPoolDetails(context, clusterInfo, n.Spec.RADOS.Pool) - if err != nil { - return errors.Wrapf(err, "pool %q not found", n.Spec.RADOS.Pool) - } - - return nil -} diff --git a/pkg/operator/ceph/nfs/spec.go b/pkg/operator/ceph/nfs/spec.go deleted file mode 100644 index 088542a5b..000000000 --- a/pkg/operator/ceph/nfs/spec.go +++ /dev/null @@ -1,282 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -const ( - // AppName is the name of the app - AppName = "rook-ceph-nfs" - ganeshaConfigVolume = "ganesha-config" - nfsPort = 2049 - ganeshaPid = "/var/run/ganesha/ganesha.pid" -) - -func (r *ReconcileCephNFS) generateCephNFSService(nfs *cephv1.CephNFS, cfg daemonConfig) *v1.Service { - labels := getLabels(nfs, cfg.ID, true) - - svc := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: instanceName(nfs, cfg.ID), - Namespace: nfs.Namespace, - Labels: labels, - }, - Spec: v1.ServiceSpec{ - Selector: labels, - Ports: []v1.ServicePort{ - { - Name: "nfs", - Port: nfsPort, - TargetPort: intstr.FromInt(int(nfsPort)), - Protocol: v1.ProtocolTCP, - }, - }, - }, - } - - if r.cephClusterSpec.Network.IsHost() { - svc.Spec.ClusterIP = v1.ClusterIPNone - } - - return svc -} - -func (r *ReconcileCephNFS) createCephNFSService(nfs *cephv1.CephNFS, cfg daemonConfig) error { - ctx := context.TODO() - s := r.generateCephNFSService(nfs, cfg) - - // Set owner ref to the parent object - err := controllerutil.SetControllerReference(nfs, s, r.scheme) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to ceph nfs %q", s) - } - - svc, err := r.context.Clientset.CoreV1().Services(nfs.Namespace).Create(ctx, s, metav1.CreateOptions{}) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create ganesha service") - } - logger.Infof("ceph nfs service already created") - return nil - } - - logger.Infof("ceph nfs service running at %s:%d", svc.Spec.ClusterIP, nfsPort) - return nil -} - -func (r *ReconcileCephNFS) makeDeployment(nfs *cephv1.CephNFS, cfg daemonConfig) (*apps.Deployment, error) { - resourceName := instanceName(nfs, cfg.ID) - deployment := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: nfs.Namespace, - Labels: getLabels(nfs, cfg.ID, true), - }, - } - k8sutil.AddRookVersionLabelToDeployment(deployment) - controller.AddCephVersionLabelToDeployment(r.clusterInfo.CephVersion, deployment) - nfs.Spec.Server.Annotations.ApplyToObjectMeta(&deployment.ObjectMeta) - nfs.Spec.Server.Labels.ApplyToObjectMeta(&deployment.ObjectMeta) - - cephConfigVol, _ := cephConfigVolumeAndMount() - nfsConfigVol, _ := nfsConfigVolumeAndMount(cfg.ConfigConfigMap) - dbusVol, _ := dbusVolumeAndMount() - podSpec := v1.PodSpec{ - InitContainers: []v1.Container{ - r.connectionConfigInitContainer(nfs, cfg.ID), - }, - Containers: []v1.Container{ - r.daemonContainer(nfs, cfg), - r.dbusContainer(nfs), // dbus sidecar - }, - RestartPolicy: v1.RestartPolicyAlways, - Volumes: []v1.Volume{ - // do not mount usual daemon volumes, as no data is stored for this daemon, and the ceph - // config file is generated by the init container. we don't need to worry about missing - // override configs, because nfs-ganesha is not a Ceph daemon; it wouldn't observe any - // overrides anyway - cephConfigVol, - keyring.Volume().Resource(resourceName), - nfsConfigVol, - dbusVol, - }, - HostNetwork: r.cephClusterSpec.Network.IsHost(), - PriorityClassName: nfs.Spec.Server.PriorityClassName, - } - // Replace default unreachable node toleration - k8sutil.AddUnreachableNodeToleration(&podSpec) - - if r.cephClusterSpec.Network.IsHost() { - podSpec.DNSPolicy = v1.DNSClusterFirstWithHostNet - } - nfs.Spec.Server.Placement.ApplyToPodSpec(&podSpec) - - podTemplateSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Labels: getLabels(nfs, cfg.ID, true), - }, - Spec: podSpec, - } - - if r.cephClusterSpec.Network.IsHost() { - podSpec.DNSPolicy = v1.DNSClusterFirstWithHostNet - } else if r.cephClusterSpec.Network.IsMultus() { - if err := k8sutil.ApplyMultus(r.cephClusterSpec.Network, &podTemplateSpec.ObjectMeta); err != nil { - return nil, err - } - } - - nfs.Spec.Server.Annotations.ApplyToObjectMeta(&podTemplateSpec.ObjectMeta) - nfs.Spec.Server.Labels.ApplyToObjectMeta(&podTemplateSpec.ObjectMeta) - - // Multiple replicas of the nfs service would be handled by creating a service and a new deployment for each one, rather than increasing the pod count here - replicas := int32(1) - deployment.Spec = apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: getLabels(nfs, cfg.ID, false), - }, - Template: podTemplateSpec, - Replicas: &replicas, - } - - return deployment, nil -} - -func (r *ReconcileCephNFS) connectionConfigInitContainer(nfs *cephv1.CephNFS, name string) v1.Container { - _, cephConfigMount := cephConfigVolumeAndMount() - - return controller.GenerateMinimalCephConfInitContainer( - getNFSClientID(nfs, name), - keyring.VolumeMount().KeyringFilePath(), - r.cephClusterSpec.CephVersion.Image, - []v1.VolumeMount{ - cephConfigMount, - keyring.VolumeMount().Resource(instanceName(nfs, name)), - }, - nfs.Spec.Server.Resources, - controller.PodSecurityContext(), - ) -} - -func (r *ReconcileCephNFS) daemonContainer(nfs *cephv1.CephNFS, cfg daemonConfig) v1.Container { - _, cephConfigMount := cephConfigVolumeAndMount() - _, nfsConfigMount := nfsConfigVolumeAndMount(cfg.ConfigConfigMap) - _, dbusMount := dbusVolumeAndMount() - logLevel := "NIV_INFO" // Default log level - if nfs.Spec.Server.LogLevel != "" { - logLevel = nfs.Spec.Server.LogLevel - } - - return v1.Container{ - Name: "nfs-ganesha", - Command: []string{ - "ganesha.nfsd", - }, - Args: []string{ - "-F", // foreground - "-L", "STDERR", // log to stderr - "-p", ganeshaPid, // PID file location - "-N", logLevel, // Change Log level - }, - Image: r.cephClusterSpec.CephVersion.Image, - VolumeMounts: []v1.VolumeMount{ - cephConfigMount, - keyring.VolumeMount().Resource(instanceName(nfs, cfg.ID)), - nfsConfigMount, - dbusMount, - }, - Env: controller.DaemonEnvVars(r.cephClusterSpec.CephVersion.Image), - Resources: nfs.Spec.Server.Resources, - SecurityContext: controller.PodSecurityContext(), - } -} - -func (r *ReconcileCephNFS) dbusContainer(nfs *cephv1.CephNFS) v1.Container { - _, dbusMount := dbusVolumeAndMount() - - return v1.Container{ - Name: "dbus-daemon", - Command: []string{ - "dbus-daemon", - }, - Args: []string{ - "--nofork", // run in foreground - "--system", // use system config file (uses /run/dbus/system_bus_socket) - "--nopidfile", // don't write a pid file - // some dbus-daemon versions have flag --nosyslog to send logs to sterr; not ceph upstream image - }, - Image: r.cephClusterSpec.CephVersion.Image, - VolumeMounts: []v1.VolumeMount{ - dbusMount, - }, - Env: k8sutil.ClusterDaemonEnvVars(r.cephClusterSpec.CephVersion.Image), // do not need access to Ceph env vars b/c not a Ceph daemon - Resources: nfs.Spec.Server.Resources, - } -} - -func getLabels(n *cephv1.CephNFS, name string, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, n.Namespace, "nfs", name, includeNewLabels) - labels["ceph_nfs"] = n.Name - labels["instance"] = name - return labels -} - -func cephConfigVolumeAndMount() (v1.Volume, v1.VolumeMount) { - // nfs ganesha produces its own ceph config file, so cannot use controller.DaemonVolume or - // controller.DaemonVolumeMounts since that will bring in global ceph config file - cfgDir := cephclient.DefaultConfigDir - volName := k8sutil.PathToVolumeName(cfgDir) - v := v1.Volume{Name: volName, VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}} - m := v1.VolumeMount{Name: volName, MountPath: cfgDir} - return v, m -} - -func nfsConfigVolumeAndMount(configConfigMap string) (v1.Volume, v1.VolumeMount) { - cfgDir := "/etc/ganesha" // cfg file: /etc/ganesha/ganesha.conf - cfgVolName := ganeshaConfigVolume - configMapSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{Name: configConfigMap}, - Items: []v1.KeyToPath{{Key: "config", Path: "ganesha.conf"}}, - } - v := v1.Volume{Name: cfgVolName, VolumeSource: v1.VolumeSource{ConfigMap: configMapSource}} - m := v1.VolumeMount{Name: cfgVolName, MountPath: cfgDir} - return v, m -} - -func dbusVolumeAndMount() (v1.Volume, v1.VolumeMount) { - dbusSocketDir := "/run/dbus" // socket file: /run/dbus/system_bus_socket - volName := k8sutil.PathToVolumeName(dbusSocketDir) - v := v1.Volume{Name: volName, VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}} - m := v1.VolumeMount{Name: volName, MountPath: dbusSocketDir} - return v, m -} diff --git a/pkg/operator/ceph/nfs/spec_test.go b/pkg/operator/ceph/nfs/spec_test.go deleted file mode 100644 index cd171297e..000000000 --- a/pkg/operator/ceph/nfs/spec_test.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/config" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - optest "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestDeploymentSpec(t *testing.T) { - nfs := &cephv1.CephNFS{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-nfs", - Namespace: "rook-ceph-test-ns", - }, - Spec: cephv1.NFSGaneshaSpec{ - RADOS: cephv1.GaneshaRADOSSpec{ - Pool: "myfs-data0", - Namespace: "nfs-test-ns", - }, - Server: cephv1.GaneshaServerSpec{ - Active: 3, - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(500.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(1024.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(200.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(512.0, resource.BinarySI), - }, - }, - PriorityClassName: "my-priority-class", - }, - }, - } - - clientset := optest.New(t, 1) - c := &clusterd.Context{ - Executor: &exectest.MockExecutor{}, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - s := scheme.Scheme - object := []runtime.Object{&cephv1.CephNFS{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: cephv1.NFSGaneshaSpec{ - RADOS: cephv1.GaneshaRADOSSpec{ - Pool: "foo", - Namespace: namespace, - }, - Server: cephv1.GaneshaServerSpec{ - Active: 1, - }, - }, - TypeMeta: controllerTypeMeta, - }, - } - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - r := &ReconcileCephNFS{ - client: cl, - scheme: scheme.Scheme, - context: c, - clusterInfo: &cephclient.ClusterInfo{ - FSID: "myfsid", - CephVersion: cephver.Nautilus, - }, - cephClusterSpec: &cephv1.ClusterSpec{ - CephVersion: cephv1.CephVersionSpec{ - Image: "quay.io/ceph/ceph:v15", - }, - }, - } - - id := "i" - configName := "rook-ceph-nfs-my-nfs-i" - cfg := daemonConfig{ - ID: id, - ConfigConfigMap: configName, - DataPathMap: &config.DataPathMap{ - HostDataDir: "", // nfs daemon does not store data on host, ... - ContainerDataDir: cephclient.DefaultConfigDir, // does share data in containers using emptyDir, ... - HostLogAndCrashDir: "", // and does not log to /var/log/ceph dir nor creates crash dumps - }, - } - - d, err := r.makeDeployment(nfs, cfg) - assert.NoError(t, err) - - // Deployment should have Ceph labels - optest.AssertLabelsContainRookRequirements(t, d.ObjectMeta.Labels, AppName) - - podTemplate := optest.NewPodTemplateSpecTester(t, &d.Spec.Template) - podTemplate.RunFullSuite( - AppName, - optest.ResourceLimitExpectations{ - CPUResourceLimit: "500", - MemoryResourceLimit: "1Ki", - CPUResourceRequest: "200", - MemoryResourceRequest: "512", - }, - ) - assert.Equal(t, "my-priority-class", d.Spec.Template.Spec.PriorityClassName) -} diff --git a/pkg/operator/ceph/object/admin.go b/pkg/operator/ceph/object/admin.go deleted file mode 100644 index 7124265ba..000000000 --- a/pkg/operator/ceph/object/admin.go +++ /dev/null @@ -1,338 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - "fmt" - "net/http" - "net/http/httputil" - "regexp" - - "github.com/ceph/go-ceph/rgw/admin" - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/util/exec" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" -) - -// Context holds the context for the object store. -type Context struct { - Context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - CephClusterSpec cephv1.ClusterSpec - Name string - UID string - Endpoint string - Realm string - ZoneGroup string - Zone string -} - -// AdminOpsContext holds the object store context as well as information for connecting to the admin -// ops API. -type AdminOpsContext struct { - Context - TlsCert []byte - AdminOpsUserAccessKey string - AdminOpsUserSecretKey string - AdminOpsClient *admin.API -} - -type debugHTTPClient struct { - client admin.HTTPClient - logger *capnslog.PackageLogger -} - -// NewDebugHTTPClient helps us mutating the HTTP client to debug the request/response -func NewDebugHTTPClient(client admin.HTTPClient, logger *capnslog.PackageLogger) *debugHTTPClient { - return &debugHTTPClient{client, logger} -} - -func (c *debugHTTPClient) Do(req *http.Request) (*http.Response, error) { - dump, err := httputil.DumpRequestOut(req, true) - if err != nil { - return nil, err - } - c.logger.Debugf("\n%s\n", string(dump)) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - dump, err = httputil.DumpResponse(resp, true) - if err != nil { - return nil, err - } - c.logger.Debugf("\n%s\n", string(dump)) - - return resp, nil -} - -const ( - // RGWAdminOpsUserSecretName is the secret name of the admin ops user - // #nosec G101 since this is not leaking any hardcoded credentials, it's just the secret name - RGWAdminOpsUserSecretName = "rgw-admin-ops-user" - rgwAdminOpsUserAccessKey = "accessKey" - rgwAdminOpsUserSecretKey = "secretKey" - rgwAdminOpsUserCaps = "buckets=*;users=*;usage=read;metadata=read;zone=read" -) - -var ( - rgwAdminOpsUserDisplayName = "RGW Admin Ops User" -) - -// NewContext creates a new object store context. -func NewContext(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, name string) *Context { - return &Context{Context: context, Name: name, clusterInfo: clusterInfo} -} - -func NewMultisiteContext(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, store *cephv1.CephObjectStore) (*Context, error) { - nsName := fmt.Sprintf("%s/%s", store.Namespace, store.Name) - - objContext := NewContext(context, clusterInfo, store.Name) - objContext.UID = string(store.UID) - - if err := UpdateEndpoint(objContext, &store.Spec); err != nil { - return nil, err - } - - realmName, zoneGroupName, zoneName, err := getMultisiteForObjectStore(context, &store.Spec, store.Namespace, store.Name) - if err != nil { - return nil, errors.Wrapf(err, "failed to get realm/zone group/zone for object store %q", nsName) - } - - objContext.Realm = realmName - objContext.ZoneGroup = zoneGroupName - objContext.Zone = zoneName - return objContext, nil -} - -// UpdateEndpoint updates an object.Context using the latest info from the CephObjectStore spec -func UpdateEndpoint(objContext *Context, spec *cephv1.ObjectStoreSpec) error { - nsName := fmt.Sprintf("%s/%s", objContext.clusterInfo.Namespace, objContext.Name) - - port, err := spec.GetPort() - if err != nil { - return errors.Wrapf(err, "failed to get port for object store %q", nsName) - } - objContext.Endpoint = BuildDNSEndpoint(BuildDomainName(objContext.Name, objContext.clusterInfo.Namespace), port, spec.IsTLSEnabled()) - - return nil -} - -func NewMultisiteAdminOpsContext( - objContext *Context, - spec *cephv1.ObjectStoreSpec, -) (*AdminOpsContext, error) { - accessKey, secretKey, err := GetAdminOPSUserCredentials(objContext, spec) - if err != nil { - return nil, errors.Wrapf(err, "failed to create or retrieve rgw admin ops user") - } - - httpClient, tlsCert, err := GenObjectStoreHTTPClient(objContext, spec) - if err != nil { - return nil, err - } - - // If DEBUG level is set we will mutate the HTTP client for printing request and response - var client *admin.API - if logger.LevelAt(capnslog.DEBUG) { - client, err = admin.New(objContext.Endpoint, accessKey, secretKey, NewDebugHTTPClient(httpClient, logger)) - if err != nil { - return nil, errors.Wrap(err, "failed to build admin ops API connection") - } - } else { - client, err = admin.New(objContext.Endpoint, accessKey, secretKey, httpClient) - if err != nil { - return nil, errors.Wrap(err, "failed to build admin ops API connection") - } - } - - return &AdminOpsContext{ - Context: *objContext, - TlsCert: tlsCert, - AdminOpsUserAccessKey: accessKey, - AdminOpsUserSecretKey: secretKey, - AdminOpsClient: client, - }, nil -} - -func extractJSON(output string) (string, error) { - // `radosgw-admin` sometimes leaves logs to stderr even if it succeeds. - // So we should skip them if parsing output as json. - // valid JSON can be an object (in braces) or an array (in brackets) - arrayRegex := regexp.MustCompile(`(?ms)^\[.*\]$`) - arrayMatch := arrayRegex.Find([]byte(output)) - objRegex := regexp.MustCompile(`(?ms)^{.*}$`) - objMatch := objRegex.Find([]byte(output)) - if arrayMatch == nil && objMatch == nil { - return "", errors.Errorf("didn't contain json. %s", output) - } - if arrayMatch == nil && objMatch != nil { - return string(objMatch), nil - } - if arrayMatch != nil && objMatch == nil { - return string(arrayMatch), nil - } - // if both object and array match, take the largest of the two matches - if len(arrayMatch) > len(objMatch) { - return string(arrayMatch), nil - } - return string(objMatch), nil -} - -// RunAdminCommandNoMultisite is for running radosgw-admin commands in scenarios where an object-store has not been created yet or for commands on the realm or zonegroup (ex: radosgw-admin zonegroup get) -// This function times out after a fixed interval if no response is received. -// The function will return a Kubernetes error "NotFound" when exec fails when the pod does not exist -func RunAdminCommandNoMultisite(c *Context, expectJSON bool, args ...string) (string, error) { - var output, stderr string - var err error - - // If Multus is enabled we proxy all the command to the mgr sidecar - if c.CephClusterSpec.Network.IsMultus() { - output, stderr, err = c.Context.RemoteExecutor.ExecCommandInContainerWithFullOutputWithTimeout(cephclient.ProxyAppLabel, cephclient.CommandProxyInitContainerName, c.clusterInfo.Namespace, append([]string{"radosgw-admin"}, args...)...) - } else { - command, args := cephclient.FinalizeCephCommandArgs("radosgw-admin", c.clusterInfo, args, c.Context.ConfigDir) - output, err = c.Context.Executor.ExecuteCommandWithTimeout(exec.CephCommandsTimeout, command, args...) - } - - if err != nil { - return fmt.Sprintf("%s. %s", output, stderr), err - } - if expectJSON { - match, err := extractJSON(output) - if err != nil { - return output, errors.Wrap(err, "failed to parse as JSON") - } - output = match - } - - return output, nil -} - -// This function is for running radosgw-admin commands in scenarios where an object-store has been created and the Context has been updated with the appropriate realm, zone group, and zone. -func runAdminCommand(c *Context, expectJSON bool, args ...string) (string, error) { - // If the objectStoreName is not passed in the storage class - // This means we are pointing to an external cluster so these commands are not needed - // simply because the external cluster mode does not support that yet - // - // The following conditions tries to determine if the cluster is external - // When connecting to an external cluster, the Ceph user is different than client.admin - // This is not perfect though since "client.admin" is somehow supported... - if c.Name != "" && c.clusterInfo.CephCred.Username == cephclient.AdminUsername { - options := []string{ - fmt.Sprintf("--rgw-realm=%s", c.Realm), - fmt.Sprintf("--rgw-zonegroup=%s", c.ZoneGroup), - fmt.Sprintf("--rgw-zone=%s", c.Zone), - } - - args = append(args, options...) - } - - // work around FIFO file I/O issue when radosgw-admin is not compatible between version - // installed in Rook operator and RGW version in Ceph cluster (#7573) - result, err := RunAdminCommandNoMultisite(c, expectJSON, args...) - if err != nil && isFifoFileIOError(err) { - logger.Debugf("retrying 'radosgw-admin' command with OMAP backend to work around FIFO file I/O issue. %v", result) - - // We can either run 'ceph --version' to determine the Ceph version running in the operator - // and then pick a flag to use, or we can just try to use both flags and return the one that - // works. Same number of commands being run. - retryArgs := append(args, "--rgw-data-log-backing=omap") // v16.2.0- in the operator - retryResult, retryErr := RunAdminCommandNoMultisite(c, expectJSON, retryArgs...) - if retryErr != nil && isInvalidFlagError(retryErr) { - retryArgs = append(args, "--rgw-default-data-log-backing=omap") // v16.2.1+ in the operator - retryResult, retryErr = RunAdminCommandNoMultisite(c, expectJSON, retryArgs...) - } - - return retryResult, retryErr - } - - return result, err -} - -func isFifoFileIOError(err error) bool { - exitCode, extractErr := exec.ExtractExitCode(err) - if extractErr != nil { - logger.Errorf("failed to determine return code of 'radosgw-admin' command. assuming this could be a FIFO file I/O issue. %#v", extractErr) - return true - } - // exit code 5 (EIO) is returned when there is a FIFO file I/O issue - return exitCode == 5 -} - -func isInvalidFlagError(err error) bool { - exitCode, extractErr := exec.ExtractExitCode(err) - if extractErr != nil { - logger.Errorf("failed to determine return code of 'radosgw-admin' command. assuming this could be an invalid flag error. %#v", extractErr) - } - // exit code 22 (EINVAL) is returned when there is an invalid flag - // it's also returned from some other failures, but this should be rare for Rook - return exitCode == 22 -} - -func GetAdminOPSUserCredentials(objContext *Context, spec *cephv1.ObjectStoreSpec) (string, string, error) { - ns := objContext.clusterInfo.Namespace - - if spec.IsExternal() { - // Fetch the secret for admin ops user - s := &v1.Secret{} - err := objContext.Context.Client.Get(context.TODO(), types.NamespacedName{Name: RGWAdminOpsUserSecretName, Namespace: ns}, s) - if err != nil { - return "", "", err - } - - accessKey, ok := s.Data[rgwAdminOpsUserAccessKey] - if !ok { - return "", "", errors.Errorf("failed to find accessKey %q for rgw admin ops in secret %q", rgwAdminOpsUserAccessKey, RGWAdminOpsUserSecretName) - } - secretKey, ok := s.Data[rgwAdminOpsUserSecretKey] - if !ok { - return "", "", errors.Errorf("failed to find secretKey %q for rgw admin ops in secret %q", rgwAdminOpsUserSecretKey, RGWAdminOpsUserSecretName) - } - - // Set the keys for further usage - return string(accessKey), string(secretKey), nil - } - - // Fetch the admin ops user locally - userConfig := ObjectUser{ - UserID: RGWAdminOpsUserSecretName, - DisplayName: &rgwAdminOpsUserDisplayName, - AdminOpsUser: true, - } - logger.Debugf("creating s3 user object %q for object store %q", userConfig.UserID, ns) - user, rgwerr, err := CreateUser(objContext, userConfig) - if err != nil { - if rgwerr == ErrorCodeFileExists { - user, _, err = GetUser(objContext, userConfig.UserID) - if err != nil { - return "", "", errors.Wrapf(err, "failed to get details from ceph object user %q for object store %q", userConfig.UserID, objContext.Name) - } - } else { - return "", "", errors.Wrapf(err, "failed to create object user %q. error code %d for object store %q", userConfig.UserID, rgwerr, objContext.Name) - } - } - return *user.AccessKey, *user.SecretKey, nil -} diff --git a/pkg/operator/ceph/object/admin_mock.go b/pkg/operator/ceph/object/admin_mock.go deleted file mode 100644 index 87360e2ae..000000000 --- a/pkg/operator/ceph/object/admin_mock.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import "net/http" - -// MockClient is the mock of the HTTP Client -// It can be used to mock HTTP request/response from the rgw admin ops API -type MockClient struct { - // MockDo is a type that mock the Do method from the HTTP package - MockDo MockDoType -} - -// MockDoType is a custom type that allows setting the function that our Mock Do func will run instead -type MockDoType func(req *http.Request) (*http.Response, error) - -// Do is the mock client's `Do` func -func (m *MockClient) Do(req *http.Request) (*http.Response, error) { return m.MockDo(req) } diff --git a/pkg/operator/ceph/object/admin_test.go b/pkg/operator/ceph/object/admin_test.go deleted file mode 100644 index 5e05060b2..000000000 --- a/pkg/operator/ceph/object/admin_test.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "encoding/json" - "testing" - "time" - - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/test" - "github.com/rook/rook/pkg/util/exec" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" -) - -func TestExtractJson(t *testing.T) { - s := "invalid json" - _, err := extractJSON(s) - assert.Error(t, err) - - s = `{"test": "test"}` - match, err := extractJSON(s) - assert.NoError(t, err) - assert.True(t, json.Valid([]byte(match))) - - s = `this line can't be parsed as json -{"test": "test"}` - match, err = extractJSON(s) - assert.NoError(t, err) - assert.True(t, json.Valid([]byte(match))) - - s = `this line can't be parsed as json -{"test": -"test"}` - match, err = extractJSON(s) - assert.NoError(t, err) - assert.True(t, json.Valid([]byte(match))) - - s = `{"test": "test"} -this line can't be parsed as json` - match, err = extractJSON(s) - assert.NoError(t, err) - assert.True(t, json.Valid([]byte(match))) - - // complex example with array inside an object - s = `this line can't be parsed as json -{ - "array": - [ - "test", - "test" - ] -} -this line can't be parsed as json -` - match, err = extractJSON(s) - assert.NoError(t, err) - assert.True(t, json.Valid([]byte(match))) - assert.Equal(t, `{ - "array": - [ - "test", - "test" - ] -}`, match) - - s = `[{"test": "test"}]` - match, err = extractJSON(s) - assert.NoError(t, err) - assert.True(t, json.Valid([]byte(match))) - assert.Equal(t, `[{"test": "test"}]`, match) - - s = `this line can't be parsed as json -[{"test": "test"}]` - match, err = extractJSON(s) - assert.NoError(t, err) - assert.True(t, json.Valid([]byte(match))) - assert.Equal(t, `[{"test": "test"}]`, match) - - // complex example with array of objects - s = `this line can't be parsed as json -[ - { - "one": 1, - "two": 2 - }, - { - "three": 3, - "four": 4 - } -] -this line can't be parsed as json -` - match, err = extractJSON(s) - assert.NoError(t, err) - assert.True(t, json.Valid([]byte(match))) - assert.Equal(t, `[ - { - "one": 1, - "two": 2 - }, - { - "three": 3, - "four": 4 - } -]`, match) -} - -func TestRunAdminCommandNoMultisite(t *testing.T) { - objContext := &Context{ - Context: &clusterd.Context{RemoteExecutor: exec.RemotePodCommandExecutor{ClientSet: test.New(t, 3)}}, - clusterInfo: client.AdminClusterInfo("mycluster"), - } - - t.Run("no network provider - we run the radosgw-admin command from the operator", func(t *testing.T) { - executor := &exectest.MockExecutor{ - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "zone" { - return `{ - "id": "237e6250-5f7d-4b85-9359-8cb2b1848507", - "name": "realm-a", - "current_period": "df665ecb-1762-47a9-9c66-f938d251c02a", - "epoch": 2 - }`, nil - } - return "", nil - }, - } - - objContext.Context.Executor = executor - _, err := RunAdminCommandNoMultisite(objContext, true, []string{"zone", "get"}...) - assert.NoError(t, err) - }) - - t.Run("with multus - we use the remote executor", func(t *testing.T) { - objContext.CephClusterSpec = v1.ClusterSpec{Network: v1.NetworkSpec{Provider: "multus"}} - _, err := RunAdminCommandNoMultisite(objContext, true, []string{"zone", "get"}...) - assert.Error(t, err) - - // This is not the best but it shows we go through the right codepath - assert.EqualError(t, err, "no pods found with selector \"rook-ceph-mgr\"") - }) -} diff --git a/pkg/operator/ceph/object/bucket.go b/pkg/operator/ceph/object/bucket.go deleted file mode 100644 index da16b36ba..000000000 --- a/pkg/operator/ceph/object/bucket.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "encoding/json" - "strings" - "time" - - "github.com/pkg/errors" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - cephver "github.com/rook/rook/pkg/operator/ceph/version" -) - -const ( - beforeOctopusTime = "2006-01-02 15:04:05.999999999Z" - octopusAndAfterTime = "2006-01-02T15:04:05.999999999Z" -) - -type ObjectBucketMetadata struct { - Owner string `json:"owner"` - CreatedAt time.Time `json:"createdAt"` -} - -type ObjectBucketStats struct { - Size uint64 `json:"size"` - NumberOfObjects uint64 `json:"numberOfObjects"` -} - -type ObjectBucket struct { - Name string `json:"name"` - ObjectBucketMetadata - ObjectBucketStats -} - -type rgwBucketStats struct { - Bucket string `json:"bucket"` - Usage map[string]struct { - Size uint64 `json:"size"` - NumberOfObjects uint64 `json:"num_objects"` - } -} - -type ObjectBuckets []ObjectBucket - -func (slice ObjectBuckets) Len() int { - return len(slice) -} - -func (slice ObjectBuckets) Less(i, j int) bool { - return slice[i].Name < slice[j].Name -} - -func (slice ObjectBuckets) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} - -func bucketStatsFromRGW(stats rgwBucketStats) ObjectBucketStats { - s := ObjectBucketStats{Size: 0, NumberOfObjects: 0} - for _, usage := range stats.Usage { - s.Size = s.Size + usage.Size - s.NumberOfObjects = s.NumberOfObjects + usage.NumberOfObjects - } - return s -} - -func GetBucketStats(c *Context, bucketName string) (*ObjectBucketStats, bool, error) { - result, err := runAdminCommand(c, - true, - "bucket", - "stats", - "--bucket", bucketName) - - if err != nil { - if strings.Contains(err.Error(), "exit status 2") { - return nil, true, errors.New("not found") - } else { - return nil, false, errors.Wrap(err, "failed to get bucket stats") - } - } - - var rgwStats rgwBucketStats - if err := json.Unmarshal([]byte(result), &rgwStats); err != nil { - return nil, false, errors.Wrapf(err, "failed to read buckets stats result=%s", result) - } - - stat := bucketStatsFromRGW(rgwStats) - - return &stat, false, nil -} - -func GetBucketsStats(c *Context) (map[string]ObjectBucketStats, error) { - result, err := runAdminCommand(c, - true, - "bucket", - "stats") - if err != nil { - return nil, errors.Wrap(err, "failed to list buckets") - } - - var rgwStats []rgwBucketStats - if err := json.Unmarshal([]byte(result), &rgwStats); err != nil { - return nil, errors.Wrapf(err, "failed to read buckets stats result=%s", result) - } - - stats := map[string]ObjectBucketStats{} - - for _, rgwStat := range rgwStats { - stats[rgwStat.Bucket] = bucketStatsFromRGW(rgwStat) - } - - return stats, nil -} - -func getBucketMetadata(c *Context, bucket string) (*ObjectBucketMetadata, bool, error) { - result, err := runAdminCommand(c, - false, - "metadata", - "get", - "bucket:"+bucket) - if err != nil { - return nil, false, errors.Wrap(err, "failed to list buckets") - } - - if strings.Contains(result, "can't get key") { - return nil, true, errors.New("not found") - } - match, err := extractJSON(result) - if err != nil { - return nil, false, errors.Wrapf(err, "failed to read buckets list result=%s", result) - } - - var s struct { - Data struct { - Owner string `json:"owner"` - CreationTime string `json:"creation_time"` - } `json:"data"` - } - if err := json.Unmarshal([]byte(match), &s); err != nil { - return nil, false, errors.Wrapf(err, "failed to read buckets list result=%s", match) - } - - timeParser := octopusAndAfterTime - version, err := cephver.ExtractCephVersion(opcontroller.OperatorCephBaseImageVersion) - if err != nil { - logger.Errorf("failed to extract ceph version. %v", err) - } else { - vv := *version - if !vv.IsAtLeastOctopus() { - timeParser = beforeOctopusTime - } - } - - createdAt, err := time.Parse(timeParser, s.Data.CreationTime) - if err != nil { - return nil, false, errors.Wrapf(err, "Error parsing date (%s)", s.Data.CreationTime) - } - - return &ObjectBucketMetadata{Owner: s.Data.Owner, CreatedAt: createdAt}, false, nil -} - -func GetBucket(c *Context, bucket string) (*ObjectBucket, int, error) { - stat, notFound, err := GetBucketStats(c, bucket) - if notFound { - return nil, RGWErrorNotFound, errors.New("Bucket not found") - } - - if err != nil { - return nil, RGWErrorUnknown, errors.Wrap(err, "Failed to get bucket stats") - } - - metadata, notFound, err := getBucketMetadata(c, bucket) - if notFound { - return nil, RGWErrorNotFound, errors.New("Bucket not found") - } - - if err != nil { - return nil, RGWErrorUnknown, err - } - - return &ObjectBucket{Name: bucket, ObjectBucketMetadata: ObjectBucketMetadata{Owner: metadata.Owner, CreatedAt: metadata.CreatedAt}, ObjectBucketStats: *stat}, RGWErrorNone, nil -} diff --git a/pkg/operator/ceph/object/bucket/api-handlers.go b/pkg/operator/ceph/object/bucket/api-handlers.go deleted file mode 100644 index 7a428d45d..000000000 --- a/pkg/operator/ceph/object/bucket/api-handlers.go +++ /dev/null @@ -1,50 +0,0 @@ -package bucket - -import ( - "context" - "time" - - "github.com/pkg/errors" - storagev1 "k8s.io/api/storage/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" -) - -const ( - waitDuration = time.Second * 3 - waitFactor = 2 - waitJitter = 0.5 - waitSteps = 5 - waitCap = time.Minute * 5 -) - -var backoff = wait.Backoff{ - Duration: waitDuration, - Factor: waitFactor, - Jitter: waitJitter, - Steps: waitSteps, - Cap: waitCap, -} - -func (p *Provisioner) getStorageClassWithBackoff(name string) (class *storagev1.StorageClass, err error) { - ctx := context.TODO() - logger.Infof("getting storage class %q", name) - classClient := p.context.Clientset.StorageV1().StorageClasses() - // Retry Get() with backoff. Errors other than IsNotFound are ignored. - err = wait.ExponentialBackoff(backoff, func() (done bool, err error) { - class, err = classClient.Get(ctx, name, metav1.GetOptions{}) - if err == nil { - return true, nil - } - if kerrors.IsNotFound(err) { - return true, err - } - logger.Errorf("error getting class %q, retrying. %v", name, err) - return false, nil - }) - if err != nil { - return nil, errors.Wrapf(err, "unable to Get storageclass %q", name) - } - return -} diff --git a/pkg/operator/ceph/object/bucket/provisioner.go b/pkg/operator/ceph/object/bucket/provisioner.go deleted file mode 100644 index 81eaeba4a..000000000 --- a/pkg/operator/ceph/object/bucket/provisioner.go +++ /dev/null @@ -1,733 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package bucket - -import ( - "context" - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/ceph/go-ceph/rgw/admin" - "github.com/coreos/pkg/capnslog" - bktv1alpha1 "github.com/kube-object-storage/lib-bucket-provisioner/pkg/apis/objectbucket.io/v1alpha1" - apibkt "github.com/kube-object-storage/lib-bucket-provisioner/pkg/provisioner/api" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/object" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/resource" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - cephutil "github.com/rook/rook/pkg/daemon/ceph/util" - cephObject "github.com/rook/rook/pkg/operator/ceph/object" -) - -type Provisioner struct { - context *clusterd.Context - objectContext *cephObject.Context - clusterInfo *client.ClusterInfo - bucketName string - storeDomainName string - storePort int32 - region string - // access keys for acct for the bucket *owner* - cephUserName string - accessKeyID string - secretAccessKey string - objectStoreName string - endpoint string - additionalConfigData map[string]string - tlsCert []byte - adminOpsClient *admin.API -} - -var _ apibkt.Provisioner = &Provisioner{} - -func NewProvisioner(context *clusterd.Context, clusterInfo *client.ClusterInfo) *Provisioner { - return &Provisioner{context: context, clusterInfo: clusterInfo} -} - -// Provision creates an s3 bucket and returns a connection info -// representing the bucket's endpoint and user access credentials. -func (p Provisioner) Provision(options *apibkt.BucketOptions) (*bktv1alpha1.ObjectBucket, error) { - logger.Debugf("Provision event for OB options: %+v", options) - - err := p.initializeCreateOrGrant(options) - if err != nil { - return nil, err - } - logger.Infof("Provision: creating bucket %q for OBC %q", p.bucketName, options.ObjectBucketClaim.Name) - - // dynamically create a new ceph user - p.accessKeyID, p.secretAccessKey, err = p.createCephUser("") - if err != nil { - return nil, errors.Wrap(err, "Provision: can't create ceph user") - } - - s3svc, err := cephObject.NewS3Agent(p.accessKeyID, p.secretAccessKey, p.getObjectStoreEndpoint(), logger.LevelAt(capnslog.DEBUG), p.tlsCert) - if err != nil { - p.deleteOBCResourceLogError("") - return nil, err - } - - // create the bucket - err = s3svc.CreateBucket(p.bucketName) - if err != nil { - err = errors.Wrapf(err, "error creating bucket %q", p.bucketName) - logger.Errorf(err.Error()) - p.deleteOBCResourceLogError("") - return nil, err - } - - singleBucketQuota := 1 - _, err = p.adminOpsClient.ModifyUser(context.TODO(), admin.User{ID: p.cephUserName, MaxBuckets: &singleBucketQuota}) - if err != nil { - p.deleteOBCResourceLogError(p.bucketName) - return nil, err - } - logger.Infof("set user %q bucket max to %d", p.cephUserName, singleBucketQuota) - - // setting quota limit if it is enabled - err = p.setAdditionalSettings(options) - if err != nil { - p.deleteOBCResourceLogError(p.bucketName) - return nil, err - } - - return p.composeObjectBucket(), nil -} - -// Grant attaches to an existing rgw bucket and returns a connection info -// representing the bucket's endpoint and user access credentials. -func (p Provisioner) Grant(options *apibkt.BucketOptions) (*bktv1alpha1.ObjectBucket, error) { - logger.Debugf("Grant event for OB options: %+v", options) - - // initialize and set the AWS services and commonly used variables - err := p.initializeCreateOrGrant(options) - if err != nil { - return nil, err - } - logger.Infof("Grant: allowing access to bucket %q for OBC %q", p.bucketName, options.ObjectBucketClaim.Name) - - // check and make sure the bucket exists - logger.Infof("Checking for existing bucket %q", p.bucketName) - if exists, err := p.bucketExists(p.bucketName); !exists { - return nil, errors.Wrapf(err, "bucket %s does not exist", p.bucketName) - } - - p.accessKeyID, p.secretAccessKey, err = p.createCephUser("") - if err != nil { - return nil, err - } - - // need to quota into -1 for restricting creation of new buckets in rgw - restrictBucketCreation := -1 - _, err = p.adminOpsClient.ModifyUser(context.TODO(), admin.User{ID: p.cephUserName, MaxBuckets: &restrictBucketCreation}) - if err != nil { - p.deleteOBCResourceLogError("") - return nil, err - } - - // get the bucket's owner via the bucket metadata - stats, err := p.adminOpsClient.GetBucketInfo(context.TODO(), admin.Bucket{Bucket: p.bucketName}) - if err != nil { - p.deleteOBCResourceLogError("") - return nil, errors.Wrapf(err, "failed to get bucket %q stats", p.bucketName) - } - - objectUser, err := p.adminOpsClient.GetUser(context.TODO(), admin.User{ID: stats.Owner}) - if err != nil { - p.deleteOBCResourceLogError("") - return nil, errors.Wrapf(err, "failed to get user %q", stats.Owner) - } - - s3svc, err := cephObject.NewS3Agent(objectUser.Keys[0].AccessKey, objectUser.Keys[0].SecretKey, p.getObjectStoreEndpoint(), logger.LevelAt(capnslog.DEBUG), p.tlsCert) - if err != nil { - p.deleteOBCResourceLogError("") - return nil, err - } - - // if the policy does not exist, we'll create a new and append the statement to it - policy, err := s3svc.GetBucketPolicy(p.bucketName) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - if aerr.Code() != "NoSuchBucketPolicy" { - p.deleteOBCResourceLogError("") - return nil, err - } - } - } - - statement := cephObject.NewPolicyStatement(). - WithSID(p.cephUserName). - ForPrincipals(p.cephUserName). - ForResources(p.bucketName). - ForSubResources(p.bucketName). - Allows(). - Actions(cephObject.AllowedActions...) - if policy == nil { - policy = cephObject.NewBucketPolicy(*statement) - } else { - policy = policy.ModifyBucketPolicy(*statement) - } - out, err := s3svc.PutBucketPolicy(p.bucketName, *policy) - - logger.Infof("PutBucketPolicy output: %v", out) - if err != nil { - p.deleteOBCResourceLogError("") - return nil, err - } - - // setting quota limit if it is enabled - err = p.setAdditionalSettings(options) - if err != nil { - p.deleteOBCResourceLogError("") - return nil, err - } - - // returned ob with connection info - return p.composeObjectBucket(), nil -} - -// Delete is called when the ObjectBucketClaim (OBC) is deleted and the associated -// storage class' reclaimPolicy is "Delete". Or, if a Provision() error occurs and -// the bucket controller needs to clean up before retrying. -func (p Provisioner) Delete(ob *bktv1alpha1.ObjectBucket) error { - logger.Debugf("Delete event for OB: %+v", ob) - - err := p.initializeDeleteOrRevoke(ob) - if err != nil { - return err - } - logger.Infof("Delete: deleting bucket %q for OB %q", p.bucketName, ob.Name) - - if err := p.deleteOBCResource(p.bucketName); err != nil { - return errors.Wrapf(err, "failed to delete OBCResource bucket %q", p.bucketName) - } - return nil -} - -// Revoke removes a user and creds from an existing bucket. -// Note: cleanup order below matters. -func (p Provisioner) Revoke(ob *bktv1alpha1.ObjectBucket) error { - logger.Debugf("Revoke event for OB: %+v", ob) - - err := p.initializeDeleteOrRevoke(ob) - if err != nil { - return err - } - logger.Infof("Revoke: denying access to bucket %q for OB %q", p.bucketName, ob.Name) - - bucket, err := p.adminOpsClient.GetBucketInfo(context.TODO(), admin.Bucket{Bucket: p.bucketName}) - if err != nil { - logger.Errorf("%v", err) - } else { - if bucket.Owner == "" { - return errors.Errorf("failed to find bucket %q owner", p.bucketName) - } - - user, err := p.adminOpsClient.GetUser(context.TODO(), admin.User{ID: bucket.Owner}) - if err != nil { - if errors.Is(err, admin.ErrNoSuchUser) { - // The user may not exist. Ignore this in order to ensure the PolicyStatement does not contain the - // stale user. - return nil - } - - return err - } - - s3svc, err := cephObject.NewS3Agent(user.Keys[0].AccessKey, user.Keys[0].SecretKey, p.getObjectStoreEndpoint(), logger.LevelAt(capnslog.DEBUG), p.tlsCert) - if err != nil { - return err - } - - // Ignore cases where there is no bucket policy. This may have occurred if an error ended a Grant() - // call before the policy was attached to the bucket - policy, err := s3svc.GetBucketPolicy(p.bucketName) - if err != nil { - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NoSuchBucketPolicy" { - policy = nil - logger.Errorf("no bucket policy for bucket %q, so no need to drop policy", p.bucketName) - - } else { - logger.Errorf("error getting policy for bucket %q. %v", p.bucketName, err) - return err - } - } - - if bucket.Owner == p.cephUserName { - statement := cephObject.NewPolicyStatement(). - WithSID(p.cephUserName). - ForPrincipals(p.cephUserName). - ForResources(p.bucketName). - ForSubResources(p.bucketName). - Denies(). - Actions(cephObject.AllowedActions...) - if policy == nil { - policy = cephObject.NewBucketPolicy(*statement) - } else { - policy = policy.ModifyBucketPolicy(*statement) - } - out, err := s3svc.PutBucketPolicy(p.bucketName, *policy) - logger.Infof("PutBucketPolicy output: %v", out) - if err != nil { - return errors.Wrap(err, "failed to update policy") - } else { - return nil - } - } - - // drop policy if present - if policy != nil { - policy = policy.DropPolicyStatements(p.cephUserName) - _, err := s3svc.PutBucketPolicy(p.bucketName, *policy) - if err != nil { - return err - } - logger.Infof("principal %q ejected from bucket %q policy", p.cephUserName, p.bucketName) - } - } - - // finally, delete the user - p.deleteOBCResourceLogError("") - return nil -} - -// Return the OB struct with minimal fields filled in. -// initializeCreateOrGrant sets common provisioner receiver fields and -// the services and sessions needed to provision. -func (p *Provisioner) initializeCreateOrGrant(options *apibkt.BucketOptions) error { - logger.Info("initializing and setting CreateOrGrant services") - - // set the bucket name - obc := options.ObjectBucketClaim - scName := options.ObjectBucketClaim.Spec.StorageClassName - sc, err := p.getStorageClassWithBackoff(scName) - if err != nil { - logger.Errorf("failed to get storage class for OBC %q in namespace %q. %v", obc.Name, obc.Namespace, err) - return err - } - - // In most cases we assume the bucket is to be generated dynamically. When a storage class - // defines the bucket in the parameters, it's assumed to be a request to connect to a statically - // created bucket. In these cases, we forego generating a bucket. Instead we connect a newly generated - // user to the existing bucket. - p.setBucketName(options.BucketName) - if bucketName, isStatic := isStaticBucket(sc); isStatic { - p.setBucketName(bucketName) - } - - p.setObjectStoreName(sc) - p.setRegion(sc) - p.setAdditionalConfigData(obc.Spec.AdditionalConfig) - p.setEndpoint(sc) - err = p.setObjectContext() - if err != nil { - return err - } - - // If an endpoint is declared let's use it - err = p.populateDomainAndPort(sc) - if err != nil { - return errors.Wrap(err, "failed to set domain and port") - } - err = p.setTlsCaCert() - if err != nil { - return errors.Wrapf(err, "failed to set CA cert for the OBC %q to connect with object store %q via TLS", obc.Name, p.objectStoreName) - } - - // Set admin ops api client - err = p.setAdminOpsAPIClient() - if err != nil { - // Replace the error with a nicer more comprehensive one - // If the ceph config is not initialized yet, the radosgw-admin command will fail to retrieve the user - if strings.Contains(err.Error(), opcontroller.OperatorNotInitializedMessage) { - return errors.New(opcontroller.OperatorNotInitializedMessage) - } - return errors.Wrap(err, "failed to set admin ops api client") - } - - return nil -} - -func (p *Provisioner) initializeDeleteOrRevoke(ob *bktv1alpha1.ObjectBucket) error { - - sc, err := p.getStorageClassWithBackoff(ob.Spec.StorageClassName) - if err != nil { - return errors.Wrapf(err, "failed to get storage class for OB %q", ob.Name) - } - - // set receiver fields from OB data - p.setBucketName(getBucketName(ob)) - p.cephUserName = getCephUser(ob) - p.objectStoreName = getObjectStoreName(sc) - p.setEndpoint(sc) - err = p.setObjectContext() - if err != nil { - return err - } - - err = p.populateDomainAndPort(sc) - if err != nil { - return err - } - - err = p.setTlsCaCert() - if err != nil { - return errors.Wrapf(err, "failed to set CA cert for the OB %q to connect with object store %q via TLS", ob.Name, p.objectStoreName) - } - - // Set admin ops api client - err = p.setAdminOpsAPIClient() - if err != nil { - // Replace the error with a nicer more comprehensive one - // If the ceph config is not initialized yet, the radosgw-admin command will fail to retrieve the user - if strings.Contains(err.Error(), opcontroller.OperatorNotInitializedMessage) { - return errors.New(opcontroller.OperatorNotInitializedMessage) - } - return errors.Wrap(err, "failed to set admin ops api client") - } - - return nil -} - -// Return the OB struct with minimal fields filled in. -func (p *Provisioner) composeObjectBucket() *bktv1alpha1.ObjectBucket { - - conn := &bktv1alpha1.Connection{ - Endpoint: &bktv1alpha1.Endpoint{ - BucketHost: p.storeDomainName, - BucketPort: int(p.storePort), - BucketName: p.bucketName, - Region: p.region, - AdditionalConfigData: p.additionalConfigData, - }, - Authentication: &bktv1alpha1.Authentication{ - AccessKeys: &bktv1alpha1.AccessKeys{ - AccessKeyID: p.accessKeyID, - SecretAccessKey: p.secretAccessKey, - }, - }, - AdditionalState: map[string]string{ - cephUser: p.cephUserName, - }, - } - - return &bktv1alpha1.ObjectBucket{ - Spec: bktv1alpha1.ObjectBucketSpec{ - Connection: conn, - }, - } -} - -func (p *Provisioner) setObjectContext() error { - msg := "error building object.Context: store %s cannot be empty" - // p.endpoint means we point to an external cluster - if p.objectStoreName == "" && p.endpoint == "" { - return errors.Errorf(msg, "name") - } - - // We don't need the CephObjectStore if an endpoint is provided - // In 1.3, OBC external is working with an Endpoint (from the SC param) and in 1.4 we have a CephObjectStore but we must keep backward compatibility - // In 1.4, the Endpoint from the SC is not expected and never used so we will enter the "else" condition which gets a CephObjectStore and it is present - if p.endpoint != "" { - p.objectContext = cephObject.NewContext(p.context, p.clusterInfo, p.objectStoreName) - } else { - // Get CephObjectStore - store, err := p.getObjectStore() - if err != nil { - return errors.Wrap(err, "failed to get cephObjectStore") - } - - // Set multisite context - p.objectContext, err = cephObject.NewMultisiteContext(p.context, p.clusterInfo, store) - if err != nil { - return errors.Wrap(err, "failed to set multisite on provisioner's objectContext") - } - } - - return nil -} - -// setObjectStoreDomainName sets the provisioner.storeDomainName and provisioner.port -// must be called after setObjectStoreName and setObjectStoreNamespace -func (p *Provisioner) setObjectStoreDomainName(sc *storagev1.StorageClass) error { - - name := getObjectStoreName(sc) - namespace := getObjectStoreNameSpace(sc) - // make sure the object store actually exists - _, err := p.getObjectStore() - if err != nil { - return err - } - p.storeDomainName = cephObject.BuildDomainName(name, namespace) - return nil -} - -func (p *Provisioner) setObjectStorePort() error { - store, err := p.getObjectStore() - if err != nil { - return errors.Wrap(err, "failed to get cephObjectStore") - } - p.storePort, err = store.Spec.GetPort() - return err -} - -func (p *Provisioner) setObjectStoreName(sc *storagev1.StorageClass) { - p.objectStoreName = sc.Parameters[objectStoreName] -} - -func (p *Provisioner) setBucketName(name string) { - p.bucketName = name -} - -func (p *Provisioner) setAdditionalConfigData(additionalConfigData map[string]string) { - if len(additionalConfigData) == 0 { - additionalConfigData = make(map[string]string) - } - p.additionalConfigData = additionalConfigData -} - -func (p *Provisioner) setEndpoint(sc *storagev1.StorageClass) { - p.endpoint = sc.Parameters[objectStoreEndpoint] -} - -func (p *Provisioner) setRegion(sc *storagev1.StorageClass) { - const key = "region" - p.region = sc.Parameters[key] -} - -func (p Provisioner) getObjectStoreEndpoint() string { - return fmt.Sprintf("%s:%d", p.storeDomainName, p.storePort) -} - -func (p *Provisioner) populateDomainAndPort(sc *storagev1.StorageClass) error { - endpoint := getObjectStoreEndpoint(sc) - // if endpoint is present, let's introspect it - if endpoint != "" { - p.storeDomainName = cephutil.GetIPFromEndpoint(endpoint) - if p.storeDomainName == "" { - return errors.New("failed to discover endpoint IP (is empty)") - } - p.storePort = cephutil.GetPortFromEndpoint(endpoint) - if p.storePort == 0 { - return errors.New("failed to discover endpoint port (is empty)") - } - // If no endpoint exists let's see if CephObjectStore exists - } else { - if err := p.setObjectStoreDomainName(sc); err != nil { - return errors.Wrap(err, "failed to set object store domain name") - } - if err := p.setObjectStorePort(); err != nil { - return errors.Wrap(err, "failed to set object store port") - } - } - - return nil -} - -func (p *Provisioner) deleteOBCResourceLogError(bucketname string) { - if err := p.deleteOBCResource(bucketname); err != nil { - logger.Warningf("failed to delete OBC resource. %v", err) - } -} - -// Check for additional options mentioned in OBC and set them accordingly -func (p Provisioner) setAdditionalSettings(options *apibkt.BucketOptions) error { - quotaEnabled := true - maxObjects := MaxObjectQuota(options.ObjectBucketClaim.Spec.AdditionalConfig) - maxSize := MaxSizeQuota(options.ObjectBucketClaim.Spec.AdditionalConfig) - if maxObjects == "" && maxSize == "" { - return nil - } - - // Enabling quota for the user - err := p.adminOpsClient.SetUserQuota(context.TODO(), admin.QuotaSpec{UID: p.cephUserName, Enabled: "aEnabled}) - if err != nil { - return errors.Wrapf(err, "failed to enable user %q quota for obc", p.cephUserName) - } - - if maxObjects != "" { - maxObjectsInt, err := strconv.Atoi(maxObjects) - if err != nil { - return errors.Wrap(err, "failed to convert maxObjects to integer") - } - maxObjectsInt64 := int64(maxObjectsInt) - err = p.adminOpsClient.SetUserQuota(context.TODO(), admin.QuotaSpec{UID: p.cephUserName, MaxObjects: &maxObjectsInt64}) - if err != nil { - return errors.Wrapf(err, "failed to set MaxObject to user %q", p.cephUserName) - } - } - if maxSize != "" { - maxSizeInt, err := maxSizeToInt64(maxSize) - if err != nil { - return errors.Wrapf(err, "failed to parse maxSize quota for user %q", p.cephUserName) - } - err = p.adminOpsClient.SetUserQuota(context.TODO(), admin.QuotaSpec{UID: p.cephUserName, MaxSize: &maxSizeInt}) - if err != nil { - return errors.Wrapf(err, "failed to set MaxSize to user %q", p.cephUserName) - } - } - - return nil -} - -func maxSizeToInt64(maxSize string) (int64, error) { - maxSizeInt, err := resource.ParseQuantity(maxSize) - if err != nil { - return 0, errors.Wrap(err, "failed to parse quantity") - } - - return maxSizeInt.Value(), nil -} - -func (p *Provisioner) setTlsCaCert() error { - objStore, err := p.getObjectStore() - if err != nil { - return err - } - p.tlsCert = make([]byte, 0) - if objStore.Spec.Gateway.SecurePort == p.storePort { - p.tlsCert, err = cephObject.GetTlsCaCert(p.objectContext, &objStore.Spec) - if err != nil { - return err - } - } - - return nil -} - -func (p *Provisioner) setAdminOpsAPIClient() error { - // Build TLS transport for the HTTP client if needed - httpClient := &http.Client{ - Timeout: cephObject.HttpTimeOut, - } - if p.tlsCert != nil { - httpClient.Transport = cephObject.BuildTransportTLS(p.tlsCert) - } - - // Fetch the ceph object store - cephObjectStore, err := p.getObjectStore() - if err != nil { - return errors.Wrapf(err, "failed to get ceph object store %q", p.objectStoreName) - } - - cephCluster, err := p.getCephCluster() - if err != nil { - return errors.Wrapf(err, "failed to get ceph cluster in namespace %q", p.clusterInfo.Namespace) - } - if cephCluster == nil { - return errors.Errorf("failed to read ceph cluster in namespace %q, it's nil", p.clusterInfo.Namespace) - } - // Set the Ceph Cluster Spec so that we can fetch the admin ops key properly when multus is enabled - p.objectContext.CephClusterSpec = cephCluster.Spec - - // Fetch the object store admin ops user - accessKey, secretKey, err := cephObject.GetAdminOPSUserCredentials(p.objectContext, &cephObjectStore.Spec) - if err != nil { - return errors.Wrap(err, "failed to retrieve rgw admin ops user") - } - - // Build endpoint - s3endpoint := cephObject.BuildDNSEndpoint(cephObject.BuildDomainName(p.objectContext.Name, cephObjectStore.Namespace), p.storePort, cephObjectStore.Spec.IsTLSEnabled()) - - // If DEBUG level is set we will mutate the HTTP client for printing request and response - if logger.LevelAt(capnslog.DEBUG) { - p.adminOpsClient, err = admin.New(s3endpoint, accessKey, secretKey, object.NewDebugHTTPClient(httpClient, logger)) - if err != nil { - return errors.Wrap(err, "failed to build admin ops API connection") - } - } else { - p.adminOpsClient, err = admin.New(s3endpoint, accessKey, secretKey, httpClient) - if err != nil { - return errors.Wrap(err, "failed to build admin ops API connection") - } - } - - return nil -} -func (p Provisioner) updateAdditionalSettings(ob *bktv1alpha1.ObjectBucket) error { - var maxObjectsInt64 int64 - var maxSizeInt64 int64 - var err error - var quotaEnabled bool - maxObjects := MaxObjectQuota(ob.Spec.Endpoint.AdditionalConfigData) - maxSize := MaxSizeQuota(ob.Spec.Endpoint.AdditionalConfigData) - if maxObjects != "" { - maxObjectsInt, err := strconv.Atoi(maxObjects) - if err != nil { - return errors.Wrap(err, "failed to convert maxObjects to integer") - } - maxObjectsInt64 = int64(maxObjectsInt) - } - if maxSize != "" { - maxSizeInt64, err = maxSizeToInt64(maxSize) - if err != nil { - return errors.Wrapf(err, "failed to parse maxSize quota for user %q", p.cephUserName) - } - } - objectUser, err := p.adminOpsClient.GetUser(context.TODO(), admin.User{ID: ob.Spec.Connection.AdditionalState[cephUser]}) - if err != nil { - return errors.Wrapf(err, "failed to fetch user %q", p.cephUserName) - } - if *objectUser.UserQuota.Enabled && - (maxObjects == "" || maxObjectsInt64 < 0) && - (maxSize == "" || maxSizeInt64 < 0) { - quotaEnabled = false - err = p.adminOpsClient.SetUserQuota(context.TODO(), admin.QuotaSpec{UID: p.cephUserName, Enabled: "aEnabled}) - if err != nil { - return errors.Wrapf(err, "failed to disable quota to user %q", p.cephUserName) - } - return nil - } - - quotaEnabled = true - quotaSpec := admin.QuotaSpec{UID: p.cephUserName, Enabled: "aEnabled} - - //MaxObject is modified - if maxObjects != "" && (maxObjectsInt64 != *objectUser.UserQuota.MaxObjects) { - quotaSpec.MaxObjects = &maxObjectsInt64 - } - - //MaxSize is modified - if maxSize != "" && (maxSizeInt64 != *objectUser.UserQuota.MaxSize) { - quotaSpec.MaxSize = &maxSizeInt64 - } - err = p.adminOpsClient.SetUserQuota(context.TODO(), quotaSpec) - if err != nil { - return errors.Wrapf(err, "failed to update quota to user %q", p.cephUserName) - } - - return nil -} - -// Update is sent when only there is modification to AdditionalConfig field in OBC -func (p Provisioner) Update(ob *bktv1alpha1.ObjectBucket) error { - logger.Debugf("Update event for OB: %+v", ob) - - err := p.initializeDeleteOrRevoke(ob) - if err != nil { - return err - } - - return p.updateAdditionalSettings(ob) -} diff --git a/pkg/operator/ceph/object/bucket/provisioner_test.go b/pkg/operator/ceph/object/bucket/provisioner_test.go deleted file mode 100644 index ff18b3f3f..000000000 --- a/pkg/operator/ceph/object/bucket/provisioner_test.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package bucket - -import ( - "context" - "fmt" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/object" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestPopulateDomainAndPort(t *testing.T) { - ctx := context.TODO() - store := "test-store" - namespace := "ns" - clusterInfo := client.AdminClusterInfo(namespace) - p := NewProvisioner(&clusterd.Context{RookClientset: rookclient.NewSimpleClientset(), Clientset: test.New(t, 1)}, clusterInfo) - p.objectContext = object.NewContext(p.context, clusterInfo, store) - sc := &storagev1.StorageClass{ - Parameters: map[string]string{ - "foo": "bar", - }, - } - - // No endpoint and no CephObjectStore - err := p.populateDomainAndPort(sc) - assert.Error(t, err) - - // Endpoint is set but port is missing - sc.Parameters["endpoint"] = "192.168.0.1" - err = p.populateDomainAndPort(sc) - assert.Error(t, err) - - // Endpoint is set but IP is missing - sc.Parameters["endpoint"] = ":80" - err = p.populateDomainAndPort(sc) - assert.Error(t, err) - - // Endpoint is correct - sc.Parameters["endpoint"] = "192.168.0.1:80" - err = p.populateDomainAndPort(sc) - assert.NoError(t, err) - assert.Equal(t, "192.168.0.1", p.storeDomainName) - assert.Equal(t, int32(80), p.storePort) - - // No endpoint but a CephObjectStore - sc.Parameters["endpoint"] = "" - sc.Parameters["objectStoreNamespace"] = namespace - sc.Parameters["objectStoreName"] = store - cephObjectStore := &cephv1.CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: store, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectStore"}, - Spec: cephv1.ObjectStoreSpec{ - Gateway: cephv1.GatewaySpec{ - Port: int32(80), - }, - }, - } - svc := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", object.AppName, store), - Namespace: namespace, - }, - Spec: v1.ServiceSpec{ - ClusterIP: "192.168.0.1", - Ports: []v1.ServicePort{{Name: "port", Port: int32(80)}}, - }, - } - - _, err = p.context.RookClientset.CephV1().CephObjectStores(namespace).Create(ctx, cephObjectStore, metav1.CreateOptions{}) - assert.NoError(t, err) - _, err = p.context.Clientset.CoreV1().Services(namespace).Create(ctx, svc, metav1.CreateOptions{}) - assert.NoError(t, err) - p.objectStoreName = store - err = p.populateDomainAndPort(sc) - assert.NoError(t, err) - assert.Equal(t, "rook-ceph-rgw-test-store.ns.svc", p.storeDomainName) -} - -func TestMaxSizeToInt64(t *testing.T) { - type args struct { - maxSize string - } - tests := []struct { - name string - args args - want int64 - wantErr bool - }{ - {"invalid size", args{maxSize: "foo"}, 0, true}, - {"2gb size is invalid", args{maxSize: "2g"}, 0, true}, - {"2G size is valid", args{maxSize: "2G"}, 2000000000, false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := maxSizeToInt64(tt.args.maxSize) - if (err != nil) != tt.wantErr { - t.Errorf("maxSizeToInt64() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("maxSizeToInt64() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/operator/ceph/object/bucket/rgw-handlers.go b/pkg/operator/ceph/object/bucket/rgw-handlers.go deleted file mode 100644 index b43b14171..000000000 --- a/pkg/operator/ceph/object/bucket/rgw-handlers.go +++ /dev/null @@ -1,121 +0,0 @@ -package bucket - -import ( - "context" - "fmt" - - "github.com/ceph/go-ceph/rgw/admin" - "github.com/pkg/errors" -) - -func (p *Provisioner) bucketExists(name string) (bool, error) { - _, err := p.adminOpsClient.GetBucketInfo(context.TODO(), admin.Bucket{Bucket: name}) - if err != nil { - if errors.Is(err, admin.ErrNoSuchBucket) { - return false, nil - } - return false, errors.Wrapf(err, "failed to get ceph bucket %q", name) - } - return true, nil -} - -func (p *Provisioner) userExists(name string) (bool, error) { - _, err := p.adminOpsClient.GetUser(context.TODO(), admin.User{ID: name}) - if err != nil { - if errors.Is(err, admin.ErrNoSuchUser) { - return false, nil - } else { - return false, errors.Wrapf(err, "failed to get ceph user %q", name) - } - } - - return true, nil -} - -// Create a Ceph user based on the passed-in name or a generated name. Return the -// accessKeys and set user name and keys in receiver. -func (p *Provisioner) createCephUser(username string) (accKey string, secKey string, err error) { - if len(username) == 0 { - username, err = p.genUserName() - if len(username) == 0 || err != nil { - return "", "", errors.Wrap(err, "no user name provided and unable to generate a unique name") - } - } - p.cephUserName = username - - logger.Infof("creating Ceph user %q", username) - userConfig := admin.User{ - ID: username, - DisplayName: p.cephUserName, - } - - var u admin.User - u, err = p.adminOpsClient.GetUser(context.TODO(), userConfig) - if err != nil { - if errors.Is(err, admin.ErrNoSuchUser) { - u, err = p.adminOpsClient.CreateUser(context.TODO(), userConfig) - if err != nil { - return "", "", errors.Wrapf(err, "failed to create ceph object user %v", userConfig.ID) - } - } else { - return "", "", errors.Wrapf(err, "failed to get ceph user %q", username) - } - } - - logger.Infof("successfully created Ceph user %q with access keys", username) - return u.Keys[0].AccessKey, u.Keys[0].SecretKey, nil -} - -// returns "" if unable to generate a unique name. -func (p *Provisioner) genUserName() (genName string, err error) { - const ( - maxTries = 10 - prefix = "ceph-user" - ) - - notUnique := true - // generate names and check that the user does not already exist. otherwise, - // radosgw-admin will just return the existing user. - // when notUnique == true, the loop breaks and `name` contains the latest generated name - for i := 0; notUnique && i < maxTries; i++ { - genName = fmt.Sprintf("%s-%s", prefix, randomString(genUserLen)) - if notUnique, err = p.userExists(genName); err != nil { - return "", err - } - } - return genName, nil -} - -// Delete the user and bucket created by OBC with help of radosgw-admin commands -// If delete user failed, error is no longer returned since its permission is -// already revoked and hence user is no longer able to access the bucket -// Empty string is passed for bucketName only if user needs to be removed, ex Revoke() -func (p *Provisioner) deleteOBCResource(bucketName string) error { - - logger.Infof("deleting Ceph user %q and bucket %q", p.cephUserName, bucketName) - if len(bucketName) > 0 { - // delete bucket with purge option to remove all objects - thePurge := true - err := p.adminOpsClient.RemoveBucket(context.TODO(), admin.Bucket{Bucket: bucketName, PurgeObject: &thePurge}) - if err == nil { - logger.Infof("bucket %q successfully deleted", p.bucketName) - } else if errors.Is(err, admin.ErrNoSuchBucket) { - // opinion: "not found" is not an error - logger.Infof("bucket %q does not exist", p.bucketName) - } else { - return errors.Wrapf(err, "failed to delete bucket %q", bucketName) - } - } - if len(p.cephUserName) > 0 { - err := p.adminOpsClient.RemoveUser(context.TODO(), admin.User{ID: p.cephUserName}) - if err != nil { - if errors.Is(err, admin.ErrNoSuchUser) { - logger.Warningf("user %q does not exist, nothing to delete. %v", p.cephUserName, err) - } - logger.Warningf("failed to delete user %q. %v", p.cephUserName, err) - } else { - logger.Infof("user %q successfully deleted", p.cephUserName) - } - } - return nil -} diff --git a/pkg/operator/ceph/object/bucket/util.go b/pkg/operator/ceph/object/bucket/util.go deleted file mode 100644 index 9fd4ed6ef..000000000 --- a/pkg/operator/ceph/object/bucket/util.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package bucket - -import ( - "context" - "crypto/rand" - - "github.com/coreos/pkg/capnslog" - bktv1alpha1 "github.com/kube-object-storage/lib-bucket-provisioner/pkg/apis/objectbucket.io/v1alpha1" - "github.com/kube-object-storage/lib-bucket-provisioner/pkg/provisioner" - "github.com/pkg/errors" - storagev1 "k8s.io/api/storage/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - cephObject "github.com/rook/rook/pkg/operator/ceph/object" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-bucket-prov") - -const ( - genUserLen = 8 - cephUser = "cephUser" - objectStoreName = "objectStoreName" - objectStoreNamespace = "objectStoreNamespace" - objectStoreEndpoint = "endpoint" -) - -func NewBucketController(cfg *rest.Config, p *Provisioner) (*provisioner.Provisioner, error) { - const allNamespaces = "" - provName := cephObject.GetObjectBucketProvisioner(p.context, p.clusterInfo.Namespace) - - logger.Infof("ceph bucket provisioner launched watching for provisioner %q", provName) - return provisioner.NewProvisioner(cfg, provName, p, allNamespaces) -} - -func getObjectStoreName(sc *storagev1.StorageClass) string { - return sc.Parameters[objectStoreName] -} - -func getObjectStoreNameSpace(sc *storagev1.StorageClass) string { - return sc.Parameters[objectStoreNamespace] -} - -func getObjectStoreEndpoint(sc *storagev1.StorageClass) string { - return sc.Parameters[objectStoreEndpoint] -} - -func getBucketName(ob *bktv1alpha1.ObjectBucket) string { - return ob.Spec.Endpoint.BucketName -} - -func isStaticBucket(sc *storagev1.StorageClass) (string, bool) { - const key = "bucketName" - val, ok := sc.Parameters[key] - return val, ok -} - -func getCephUser(ob *bktv1alpha1.ObjectBucket) string { - return ob.Spec.AdditionalState[cephUser] -} - -func (p *Provisioner) getObjectStore() (*cephv1.CephObjectStore, error) { - ctx := context.TODO() - // Verify the object store API object actually exists - store, err := p.context.RookClientset.CephV1().CephObjectStores(p.clusterInfo.Namespace).Get(ctx, p.objectStoreName, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - return nil, errors.Wrap(err, "cephObjectStore not found") - } - return nil, errors.Wrapf(err, "failed to get ceph object store %q", p.objectStoreName) - } - return store, err -} - -func (p *Provisioner) getCephCluster() (*cephv1.CephCluster, error) { - cephCluster, err := p.context.RookClientset.CephV1().CephClusters(p.clusterInfo.Namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "failed to list ceph clusters in namespace %q", p.clusterInfo.Namespace) - } - if len(cephCluster.Items) == 0 { - return nil, errors.Errorf("failed to find ceph cluster in namespace %q", p.clusterInfo.Namespace) - } - - // This is a bit weak, but there will always be a single cluster per namespace anyway - return &cephCluster.Items[0], err -} - -func randomString(n int) string { - - var letterRunes = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - b := make([]byte, n) - if _, err := rand.Read(b); err != nil { - return "" - } - for k, v := range b { - b[k] = letterRunes[v%byte(len(letterRunes))] - } - return string(b) -} - -func MaxObjectQuota(AdditionalConfig map[string]string) string { - return AdditionalConfig["maxObjects"] -} - -func MaxSizeQuota(AdditionalConfig map[string]string) string { - return AdditionalConfig["maxSize"] -} diff --git a/pkg/operator/ceph/object/config.go b/pkg/operator/ceph/object/config.go deleted file mode 100644 index 6f2456e18..000000000 --- a/pkg/operator/ceph/object/config.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "fmt" - "path" - "strconv" - "strings" - "time" - - "github.com/pkg/errors" - cephconfig "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/config/keyring" - v1 "k8s.io/api/core/v1" -) - -const ( - keyringTemplate = ` -[%s] -key = %s -caps mon = "allow rw" -caps osd = "allow rwx" -` - - caBundleVolumeName = "rook-ceph-custom-ca-bundle" - caBundleUpdatedVolumeName = "rook-ceph-ca-bundle-updated" - caBundleTrustedDir = "/etc/pki/ca-trust/" - caBundleSourceCustomDir = caBundleTrustedDir + "source/anchors/" - caBundleExtractedDir = caBundleTrustedDir + "extracted/" - caBundleKeyName = "cabundle" - caBundleFileName = "custom-ca-bundle.crt" - certVolumeName = "rook-ceph-rgw-cert" - certDir = "/etc/ceph/private" - certKeyName = "cert" - certFilename = "rgw-cert.pem" - certKeyFileName = "rgw-key.pem" - rgwPortInternalPort int32 = 8080 - ServiceServingCertCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" - HttpTimeOut = time.Second * 15 -) - -var ( - rgwFrontendName = "beast" -) - -func (c *clusterConfig) portString() string { - var portString string - - port := c.store.Spec.Gateway.Port - if port != 0 { - if !c.clusterSpec.Network.IsHost() { - port = rgwPortInternalPort - } - portString = fmt.Sprintf("port=%s", strconv.Itoa(int(port))) - } - if c.store.Spec.IsTLSEnabled() { - certPath := path.Join(certDir, certFilename) - // This is the beast backend - // Config is: http://docs.ceph.com/docs/master/radosgw/frontends/#id3 - if port != 0 { - portString = fmt.Sprintf("%s ssl_port=%d ssl_certificate=%s", - portString, c.store.Spec.Gateway.SecurePort, certPath) - } else { - portString = fmt.Sprintf("ssl_port=%d ssl_certificate=%s", - c.store.Spec.Gateway.SecurePort, certPath) - } - secretType, _ := c.rgwTLSSecretType(c.store.Spec.Gateway.SSLCertificateRef) - if c.store.Spec.GetServiceServingCert() != "" || secretType == v1.SecretTypeTLS { - privateKey := path.Join(certDir, certKeyFileName) - portString = fmt.Sprintf("%s ssl_private_key=%s", portString, privateKey) - } - } - return portString -} - -func generateCephXUser(name string) string { - user := strings.TrimPrefix(name, AppName) - return "client.rgw" + strings.Replace(user, "-", ".", -1) -} - -func (c *clusterConfig) generateKeyring(rgwConfig *rgwConfig) (string, error) { - user := generateCephXUser(rgwConfig.ResourceName) - /* TODO: this says `osd allow rwx` while template says `osd allow *`; which is correct? */ - access := []string{"osd", "allow rwx", "mon", "allow rw"} - s := keyring.GetSecretStore(c.context, c.clusterInfo, c.ownerInfo) - - key, err := s.GenerateKey(user, access) - if err != nil { - return "", err - } - - keyring := fmt.Sprintf(keyringTemplate, user, key) - return keyring, s.CreateOrUpdate(rgwConfig.ResourceName, keyring) -} - -func (c *clusterConfig) setDefaultFlagsMonConfigStore(rgwName string) error { - monStore := cephconfig.GetMonStore(c.context, c.clusterInfo) - who := generateCephXUser(rgwName) - configOptions := make(map[string]string) - - configOptions["rgw_log_nonexistent_bucket"] = "true" - configOptions["rgw_log_object_name_utc"] = "true" - configOptions["rgw_enable_usage_log"] = "true" - configOptions["rgw_zone"] = c.store.Name - configOptions["rgw_zonegroup"] = c.store.Name - - for flag, val := range configOptions { - err := monStore.Set(who, flag, val) - if err != nil { - return errors.Wrapf(err, "failed to set %q to %q on %q", flag, val, who) - } - } - - return nil -} - -func (c *clusterConfig) deleteFlagsMonConfigStore(rgwName string) error { - monStore := cephconfig.GetMonStore(c.context, c.clusterInfo) - who := generateCephXUser(rgwName) - err := monStore.DeleteDaemon(who) - if err != nil { - return errors.Wrapf(err, "failed to delete rgw config for %q in mon configuration database", who) - } - - logger.Infof("successfully deleted rgw config for %q in mon configuration database", who) - return nil -} diff --git a/pkg/operator/ceph/object/config_test.go b/pkg/operator/ceph/object/config_test.go deleted file mode 100644 index 478df1d1d..000000000 --- a/pkg/operator/ceph/object/config_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" -) - -func newConfig(t *testing.T) *clusterConfig { - clusterInfo := &cephclient.ClusterInfo{ - CephVersion: cephver.Nautilus, - } - clusterSpec := &cephv1.ClusterSpec{ - Network: cephv1.NetworkSpec{ - HostNetwork: false, - }, - } - return &clusterConfig{ - store: &cephv1.CephObjectStore{ - Spec: cephv1.ObjectStoreSpec{ - Gateway: cephv1.GatewaySpec{}, - }}, - clusterInfo: clusterInfo, - clusterSpec: clusterSpec, - context: &clusterd.Context{Clientset: test.New(t, 3)}, - } -} - -func TestPortString(t *testing.T) { - // No port or secure port on beast - cfg := newConfig(t) - result := cfg.portString() - assert.Equal(t, "", result) - - // Insecure port on beast - cfg = newConfig(t) - // Set host networking - cfg.clusterSpec.Network.HostNetwork = true - cfg.store.Spec.Gateway.Port = 80 - result = cfg.portString() - assert.Equal(t, "port=80", result) - - // Secure port on beast - cfg = newConfig(t) - cfg.store.Spec.Gateway.SecurePort = 443 - cfg.store.Spec.Gateway.SSLCertificateRef = "some-k8s-key-secret" - result = cfg.portString() - assert.Equal(t, "ssl_port=443 ssl_certificate=/etc/ceph/private/rgw-cert.pem", result) - - // Both ports on beast - cfg = newConfig(t) - // Set host networking - cfg.clusterSpec.Network.HostNetwork = true - cfg.store.Spec.Gateway.Port = 80 - cfg.store.Spec.Gateway.SecurePort = 443 - cfg.store.Spec.Gateway.SSLCertificateRef = "some-k8s-key-secret" - result = cfg.portString() - assert.Equal(t, "port=80 ssl_port=443 ssl_certificate=/etc/ceph/private/rgw-cert.pem", result) - - // Secure port requires the cert on beast - cfg = newConfig(t) - cfg.store.Spec.Gateway.SecurePort = 443 - result = cfg.portString() - assert.Equal(t, "", result) - - // Using SDN, no host networking so the rgw port internal is not the same - cfg = newConfig(t) - cfg.store.Spec.Gateway.Port = 80 - result = cfg.portString() - assert.Equal(t, "port=8080", result) -} - -func TestGenerateCephXUser(t *testing.T) { - fakeUser := generateCephXUser("rook-ceph-rgw-fake-store-fake-user") - assert.Equal(t, "client.rgw.fake.store.fake.user", fakeUser) -} diff --git a/pkg/operator/ceph/object/controller.go b/pkg/operator/ceph/object/controller.go deleted file mode 100644 index f4c8306aa..000000000 --- a/pkg/operator/ceph/object/controller.go +++ /dev/null @@ -1,528 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - "fmt" - "reflect" - "strings" - "syscall" - "time" - - "github.com/coreos/pkg/capnslog" - bktclient "github.com/kube-object-storage/lib-bucket-provisioner/pkg/client/clientset/versioned" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opconfig "github.com/rook/rook/pkg/operator/ceph/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - controllerName = "ceph-object-controller" -) - -var waitForRequeueIfObjectStoreNotReady = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -// List of object resources to watch by the controller -var objectsToWatch = []client.Object{ - &corev1.Secret{TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: corev1.SchemeGroupVersion.String()}}, - &corev1.Service{TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: corev1.SchemeGroupVersion.String()}}, - &appsv1.Deployment{TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.String()}}, -} - -var cephObjectStoreKind = reflect.TypeOf(cephv1.CephObjectStore{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephObjectStoreKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -// ReconcileCephObjectStore reconciles a cephObjectStore object -type ReconcileCephObjectStore struct { - client client.Client - bktclient bktclient.Interface - scheme *runtime.Scheme - context *clusterd.Context - clusterSpec *cephv1.ClusterSpec - clusterInfo *cephclient.ClusterInfo - objectStoreChannels map[string]*objectStoreHealth - recorder *k8sutil.EventReporter -} - -type objectStoreHealth struct { - stopChan chan struct{} - monitoringRunning bool -} - -// Add creates a new cephObjectStore Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - - context.Client = mgr.GetClient() - return &ReconcileCephObjectStore{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - bktclient: bktclient.NewForConfigOrDie(context.KubeConfig), - objectStoreChannels: make(map[string]*objectStoreHealth), - recorder: k8sutil.NewEventReporter(mgr.GetEventRecorderFor("rook-" + controllerName)), - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the cephObjectStore CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephObjectStore{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - // Watch all other resources - for _, t := range objectsToWatch { - err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cephv1.CephObjectStore{}, - }, opcontroller.WatchPredicateForNonCRDObject(&cephv1.CephObjectStore{TypeMeta: controllerTypeMeta}, mgr.GetScheme())) - if err != nil { - return err - } - } - - // Build Handler function to return the list of ceph object - // This is used by the watchers below - handlerFunc, err := opcontroller.ObjectToCRMapper(mgr.GetClient(), &cephv1.CephObjectStoreList{}, mgr.GetScheme()) - if err != nil { - return err - } - - // Watch for CephCluster Spec changes that we want to propagate to us - err = c.Watch(&source.Kind{Type: &cephv1.CephCluster{ - TypeMeta: metav1.TypeMeta{ - Kind: opcontroller.ClusterResource.Kind, - APIVersion: opcontroller.ClusterResource.APIVersion, - }, - }, - }, handler.EnqueueRequestsFromMapFunc(handlerFunc), opcontroller.WatchCephClusterPredicate()) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a cephObjectStore object and makes changes based on the state read -// and what is in the cephObjectStore.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileCephObjectStore) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, objectStore, err := r.reconcile(request) - - return reporting.ReportReconcileResult(logger, r.recorder, objectStore, reconcileResponse, err) -} - -func (r *ReconcileCephObjectStore) reconcile(request reconcile.Request) (reconcile.Result, *cephv1.CephObjectStore, error) { - // Fetch the cephObjectStore instance - cephObjectStore := &cephv1.CephObjectStore{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephObjectStore) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("cephObjectStore resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, cephObjectStore, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, cephObjectStore, errors.Wrap(err, "failed to get cephObjectStore") - } - - // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephObjectStore) - if err != nil { - return reconcile.Result{}, cephObjectStore, errors.Wrap(err, "failed to add finalizer") - } - - // The CR was just created, initializing status fields - if cephObjectStore.Status == nil { - // The store is not available so let's not build the status Info yet - updateStatus(r.client, request.NamespacedName, cephv1.ConditionProgressing, map[string]string{}) - } - - // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - // This handles the case where the Ceph Cluster is gone and we want to delete that CR - // We skip the deleteStore() function since everything is gone already - // - // Also, only remove the finalizer if the CephCluster is gone - // If not, we should wait for it to be ready - // This handles the case where the operator is not ready to accept Ceph command but the cluster exists - if !cephObjectStore.GetDeletionTimestamp().IsZero() && !cephClusterExists { - // Remove finalizer - err := opcontroller.RemoveFinalizer(r.client, cephObjectStore) - if err != nil { - return reconcile.Result{}, cephObjectStore, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, cephObjectStore, nil - } - - return reconcileResponse, cephObjectStore, nil - } - r.clusterSpec = &cephCluster.Spec - - // Initialize the channel for this object store - // This allows us to track multiple ObjectStores in the same namespace - _, ok := r.objectStoreChannels[cephObjectStore.Name] - if !ok { - r.objectStoreChannels[cephObjectStore.Name] = &objectStoreHealth{ - stopChan: make(chan struct{}), - monitoringRunning: false, - } - } - - // Populate clusterInfo during each reconcile - r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return reconcile.Result{}, cephObjectStore, errors.Wrap(err, "failed to populate cluster info") - } - - // Populate CephVersion - currentCephVersion, err := cephclient.LeastUptodateDaemonVersion(r.context, r.clusterInfo, opconfig.MonType) - if err != nil { - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Info(opcontroller.OperatorNotInitializedMessage) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, cephObjectStore, nil - } - return reconcile.Result{}, cephObjectStore, errors.Wrapf(err, "failed to retrieve current ceph %q version", opconfig.MonType) - } - r.clusterInfo.CephVersion = currentCephVersion - - // DELETE: the CR was deleted - if !cephObjectStore.GetDeletionTimestamp().IsZero() { - updateStatus(r.client, request.NamespacedName, cephv1.ConditionDeleting, buildStatusInfo(cephObjectStore)) - - if ok { - select { - case <-r.objectStoreChannels[cephObjectStore.Name].stopChan: - // channel was closed - break - default: - // Close the channel to stop the healthcheck of the endpoint - close(r.objectStoreChannels[cephObjectStore.Name].stopChan) - } - - // get the latest version of the object now that the health checker is stopped - err := r.client.Get(context.TODO(), request.NamespacedName, cephObjectStore) - if err != nil { - return reconcile.Result{}, cephObjectStore, errors.Wrapf(err, "failed to get latest CephObjectStore %q", request.NamespacedName.String()) - } - - objCtx, err := NewMultisiteContext(r.context, r.clusterInfo, cephObjectStore) - if err != nil { - return reconcile.Result{}, cephObjectStore, errors.Wrapf(err, "failed to check for object buckets. failed to get object context") - } - - opsCtx, err := NewMultisiteAdminOpsContext(objCtx, &cephObjectStore.Spec) - if err != nil { - return reconcile.Result{}, cephObjectStore, errors.Wrapf(err, "failed to check for object buckets. failed to get admin ops API context") - } - - deps, err := CephObjectStoreDependents(r.context, r.clusterInfo, cephObjectStore, objCtx, opsCtx) - if err != nil { - return reconcile.Result{}, cephObjectStore, err - } - if !deps.Empty() { - err := reporting.ReportDeletionBlockedDueToDependents(logger, r.client, cephObjectStore, deps) - return opcontroller.WaitForRequeueIfFinalizerBlocked, cephObjectStore, err - } - reporting.ReportDeletionNotBlockedDueToDependents(logger, r.client, r.recorder, cephObjectStore) - - cfg := clusterConfig{ - context: r.context, - store: cephObjectStore, - clusterSpec: r.clusterSpec, - clusterInfo: r.clusterInfo, - } - cfg.deleteStore() - - // Remove object store from the map - delete(r.objectStoreChannels, cephObjectStore.Name) - } - - // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephObjectStore) - if err != nil { - return reconcile.Result{}, cephObjectStore, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, cephObjectStore, nil - } - - // validate the store settings - if err := r.validateStore(cephObjectStore); err != nil { - return reconcile.Result{}, cephObjectStore, errors.Wrapf(err, "invalid object store %q arguments", cephObjectStore.Name) - } - - // If the CephCluster has enabled the "pg_autoscaler" module and is running Nautilus - // we force the pg_autoscale_mode to "on" - _, propertyExists := cephObjectStore.Spec.DataPool.Parameters[cephclient.PgAutoscaleModeProperty] - if mgr.IsModuleInSpec(cephCluster.Spec.Mgr.Modules, mgr.PgautoscalerModuleName) && - !currentCephVersion.IsAtLeastOctopus() && - !propertyExists { - if len(cephObjectStore.Spec.DataPool.Parameters) == 0 { - cephObjectStore.Spec.DataPool.Parameters = make(map[string]string) - } - cephObjectStore.Spec.DataPool.Parameters[cephclient.PgAutoscaleModeProperty] = cephclient.PgAutoscaleModeOn - } - - // CREATE/UPDATE - _, err = r.reconcileCreateObjectStore(cephObjectStore, request.NamespacedName, cephCluster.Spec) - if err != nil && kerrors.IsNotFound(err) { - logger.Info(opcontroller.OperatorNotInitializedMessage) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, cephObjectStore, nil - } else if err != nil { - result, err := r.setFailedStatus(request.NamespacedName, "failed to create object store deployments", err) - return result, cephObjectStore, err - } - - // Set Progressing status, we are done reconciling, the health check go routine will update the status - updateStatus(r.client, request.NamespacedName, cephv1.ConditionProgressing, buildStatusInfo(cephObjectStore)) - - // Return and do not requeue - logger.Debug("done reconciling") - return reconcile.Result{}, cephObjectStore, nil -} - -func (r *ReconcileCephObjectStore) reconcileCreateObjectStore(cephObjectStore *cephv1.CephObjectStore, namespacedName types.NamespacedName, cluster cephv1.ClusterSpec) (reconcile.Result, error) { - ownerInfo := k8sutil.NewOwnerInfo(cephObjectStore, r.scheme) - cfg := clusterConfig{ - context: r.context, - clusterInfo: r.clusterInfo, - store: cephObjectStore, - rookVersion: r.clusterSpec.CephVersion.Image, - clusterSpec: r.clusterSpec, - DataPathMap: opconfig.NewStatelessDaemonDataPathMap(opconfig.RgwType, cephObjectStore.Name, cephObjectStore.Namespace, r.clusterSpec.DataDirHostPath), - client: r.client, - ownerInfo: ownerInfo, - } - objContext := NewContext(r.context, r.clusterInfo, cephObjectStore.Name) - objContext.UID = string(cephObjectStore.UID) - objContext.CephClusterSpec = cluster - - var err error - - if cephObjectStore.Spec.IsExternal() { - logger.Info("reconciling external object store") - - // RECONCILE SERVICE - logger.Info("reconciling object store service") - _, err = cfg.reconcileService(cephObjectStore) - if err != nil { - return r.setFailedStatus(namespacedName, "failed to reconcile service", err) - } - - // RECONCILE ENDPOINTS - // Always add the endpoint AFTER the service otherwise it will get overridden - logger.Info("reconciling external object store endpoint") - err = cfg.reconcileExternalEndpoint(cephObjectStore) - if err != nil { - return r.setFailedStatus(namespacedName, "failed to reconcile external endpoint", err) - } - } else { - logger.Info("reconciling object store deployments") - - // Reconcile realm/zonegroup/zone CRs & update their names - realmName, zoneGroupName, zoneName, reconcileResponse, err := r.reconcileMultisiteCRs(cephObjectStore) - if err != nil { - return reconcileResponse, err - } - - // Reconcile Ceph Zone if Multisite - if cephObjectStore.Spec.IsMultisite() { - reconcileResponse, err := r.reconcileCephZone(cephObjectStore, zoneGroupName, realmName) - if err != nil { - return reconcileResponse, err - } - } - - objContext.Realm = realmName - objContext.ZoneGroup = zoneGroupName - objContext.Zone = zoneName - logger.Debugf("realm for object-store is %q, zone group for object-store is %q, zone for object-store is %q", objContext.Realm, objContext.ZoneGroup, objContext.Zone) - - // RECONCILE SERVICE - logger.Debug("reconciling object store service") - serviceIP, err := cfg.reconcileService(cephObjectStore) - if err != nil { - return r.setFailedStatus(namespacedName, "failed to reconcile service", err) - } - - if err := UpdateEndpoint(objContext, &cephObjectStore.Spec); err != nil { - return r.setFailedStatus(namespacedName, "failed to set endpoint", err) - } - - // Reconcile Pool Creation - if !cephObjectStore.Spec.IsMultisite() { - logger.Info("reconciling object store pools") - err = CreatePools(objContext, r.clusterSpec, cephObjectStore.Spec.MetadataPool, cephObjectStore.Spec.DataPool) - if err != nil { - return r.setFailedStatus(namespacedName, "failed to create object pools", err) - } - } - - // Reconcile Multisite Creation - logger.Infof("setting multisite settings for object store %q", cephObjectStore.Name) - err = setMultisite(objContext, cephObjectStore, serviceIP) - if err != nil && kerrors.IsNotFound(err) { - return reconcile.Result{}, err - } else if err != nil { - return r.setFailedStatus(namespacedName, "failed to configure multisite for object store", err) - } - - // Create or Update Store - err = cfg.createOrUpdateStore(realmName, zoneGroupName, zoneName) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to create object store %q", cephObjectStore.Name) - } - } - - // Start monitoring - if !cephObjectStore.Spec.HealthCheck.Bucket.Disabled { - r.startMonitoring(cephObjectStore, objContext, namespacedName) - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileCephObjectStore) reconcileCephZone(store *cephv1.CephObjectStore, zoneGroupName string, realmName string) (reconcile.Result, error) { - realmArg := fmt.Sprintf("--rgw-realm=%s", realmName) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", zoneGroupName) - zoneArg := fmt.Sprintf("--rgw-zone=%s", store.Spec.Zone.Name) - objContext := NewContext(r.context, r.clusterInfo, store.Name) - - _, err := RunAdminCommandNoMultisite(objContext, true, "zone", "get", realmArg, zoneGroupArg, zoneArg) - if err != nil { - // ENOENT mean “No such file or directory” - if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { - return waitForRequeueIfObjectStoreNotReady, errors.Wrapf(err, "ceph zone %q not found", store.Spec.Zone.Name) - } else { - return waitForRequeueIfObjectStoreNotReady, errors.Wrapf(err, "radosgw-admin zone get failed with code %d", code) - } - } - - logger.Infof("Zone %q found in Ceph cluster will include object store %q", store.Spec.Zone.Name, store.Name) - return reconcile.Result{}, nil -} - -func (r *ReconcileCephObjectStore) reconcileMultisiteCRs(cephObjectStore *cephv1.CephObjectStore) (string, string, string, reconcile.Result, error) { - if cephObjectStore.Spec.IsMultisite() { - zoneName := cephObjectStore.Spec.Zone.Name - zone := &cephv1.CephObjectZone{} - err := r.client.Get(context.TODO(), types.NamespacedName{Name: zoneName, Namespace: cephObjectStore.Namespace}, zone) - if err != nil { - if kerrors.IsNotFound(err) { - return "", "", "", waitForRequeueIfObjectStoreNotReady, err - } - return "", "", "", waitForRequeueIfObjectStoreNotReady, errors.Wrapf(err, "error getting CephObjectZone %q", cephObjectStore.Spec.Zone.Name) - } - logger.Debugf("CephObjectZone resource %s found", zone.Name) - - zonegroup := &cephv1.CephObjectZoneGroup{} - err = r.client.Get(context.TODO(), types.NamespacedName{Name: zone.Spec.ZoneGroup, Namespace: cephObjectStore.Namespace}, zonegroup) - if err != nil { - if kerrors.IsNotFound(err) { - return "", "", "", waitForRequeueIfObjectStoreNotReady, err - } - return "", "", "", waitForRequeueIfObjectStoreNotReady, errors.Wrapf(err, "error getting CephObjectZoneGroup %q", zone.Spec.ZoneGroup) - } - logger.Debugf("CephObjectZoneGroup resource %s found", zonegroup.Name) - - realm := &cephv1.CephObjectRealm{} - err = r.client.Get(context.TODO(), types.NamespacedName{Name: zonegroup.Spec.Realm, Namespace: cephObjectStore.Namespace}, realm) - if err != nil { - if kerrors.IsNotFound(err) { - return "", "", "", waitForRequeueIfObjectStoreNotReady, err - } - return "", "", "", waitForRequeueIfObjectStoreNotReady, errors.Wrapf(err, "error getting CephObjectRealm %q", zonegroup.Spec.Realm) - } - logger.Debugf("CephObjectRealm resource %s found", realm.Name) - - return realm.Name, zonegroup.Name, zone.Name, reconcile.Result{}, nil - } - - return cephObjectStore.Name, cephObjectStore.Name, cephObjectStore.Name, reconcile.Result{}, nil -} - -func (r *ReconcileCephObjectStore) startMonitoring(objectstore *cephv1.CephObjectStore, objContext *Context, namespacedName types.NamespacedName) { - // Start monitoring object store - if r.objectStoreChannels[objectstore.Name].monitoringRunning { - logger.Debug("external rgw endpoint monitoring go routine already running!") - return - } - - rgwChecker, err := newBucketChecker(r.context, objContext, r.client, namespacedName, &objectstore.Spec) - if err != nil { - logger.Error(err) - return - } - - logger.Info("starting rgw healthcheck") - go rgwChecker.checkObjectStore(r.objectStoreChannels[objectstore.Name].stopChan) - - // Set the monitoring flag so we don't start more than one go routine - r.objectStoreChannels[objectstore.Name].monitoringRunning = true -} diff --git a/pkg/operator/ceph/object/controller_test.go b/pkg/operator/ceph/object/controller_test.go deleted file mode 100644 index 25a0e3a86..000000000 --- a/pkg/operator/ceph/object/controller_test.go +++ /dev/null @@ -1,657 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package rgw to manage a rook object store. -package object - -import ( - "context" - "os" - "testing" - "time" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - realmListJSON = `{ - "default_info": "237e6250-5f7d-4b85-9359-8cb2b1848507", - "realms": [ - "my-store" - ] - }` - realmGetJSON = `{ - "id": "237e6250-5f7d-4b85-9359-8cb2b1848507", - "name": "my-store", - "current_period": "df665ecb-1762-47a9-9c66-f938d251c02a", - "epoch": 2 - }` - zoneGroupGetJSON = `{ - "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", - "name": "my-store", - "api_name": "my-store", - "is_master": "true", - "endpoints": [ - ":80" - ], - "hostnames": [], - "hostnames_s3website": [], - "master_zone": "6cb39d2c-3005-49da-9be3-c1a92a97d28a", - "zones": [ - { - "id": "6cb39d2c-3005-49da-9be3-c1a92a97d28a", - "name": "my-store", - "endpoints": [ - ":80" - ], - "log_meta": "false", - "log_data": "false", - "bucket_index_max_shards": 0, - "read_only": "false", - "tier_type": "", - "sync_from_all": "true", - "sync_from": [], - "redirect_zone": "" - } - ], - "placement_targets": [ - { - "name": "default-placement", - "tags": [], - "storage_classes": [ - "STANDARD" - ] - } - ], - "default_placement": "default-placement", - "realm_id": "237e6250-5f7d-4b85-9359-8cb2b1848507" - }` - zoneGetJSON = `{ - "id": "6cb39d2c-3005-49da-9be3-c1a92a97d28a", - "name": "my-store", - "domain_root": "my-store.rgw.meta:root", - "control_pool": "my-store.rgw.control", - "gc_pool": "my-store.rgw.log:gc", - "lc_pool": "my-store.rgw.log:lc", - "log_pool": "my-store.rgw.log", - "intent_log_pool": "my-store.rgw.log:intent", - "usage_log_pool": "my-store.rgw.log:usage", - "reshard_pool": "my-store.rgw.log:reshard", - "user_keys_pool": "my-store.rgw.meta:users.keys", - "user_email_pool": "my-store.rgw.meta:users.email", - "user_swift_pool": "my-store.rgw.meta:users.swift", - "user_uid_pool": "my-store.rgw.meta:users.uid", - "otp_pool": "my-store.rgw.otp", - "system_key": { - "access_key": "", - "secret_key": "" - }, - "placement_pools": [ - { - "key": "default-placement", - "val": { - "index_pool": "my-store.rgw.buckets.index", - "storage_classes": { - "STANDARD": { - "data_pool": "my-store.rgw.buckets.data" - } - }, - "data_extra_pool": "my-store.rgw.buckets.non-ec", - "index_type": 0 - } - } - ], - "metadata_heap": "", - "realm_id": "" - }` - rgwCephAuthGetOrCreateKey = `{"key":"AQCvzWBeIV9lFRAAninzm+8XFxbSfTiPwoX50g=="}` - dummyVersionsRaw = ` - { - "mon": { - "ceph version 14.2.8 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 3 - } - }` - userCreateJSON = `{ - "user_id": "my-user", - "display_name": "my-user", - "email": "", - "suspended": 0, - "max_buckets": 1000, - "subusers": [], - "keys": [ - { - "user": "my-user", - "access_key": "EOE7FYCNOBZJ5VFV909G", - "secret_key": "qmIqpWm8HxCzmynCrD6U6vKWi4hnDBndOnmxXNsV" - } - ], - "swift_keys": [], - "caps": [], - "op_mask": "read, write, delete", - "default_placement": "", - "default_storage_class": "", - "placement_tags": [], - "bucket_quota": { - "enabled": false, - "check_on_raw": false, - "max_size": -1, - "max_size_kb": 0, - "max_objects": -1 - }, - "user_quota": { - "enabled": false, - "check_on_raw": false, - "max_size": -1, - "max_size_kb": 0, - "max_objects": -1 - }, - "temp_url_keys": [], - "type": "rgw", - "mfa_ids": [] -}` - realmListMultisiteJSON = `{ - "default_info": "237e6250-5f7d-4b85-9359-8cb2b1848507", - "realms": [ - "realm-a" - ] - }` - realmGetMultisiteJSON = `{ - "id": "237e6250-5f7d-4b85-9359-8cb2b1848507", - "name": "realm-a", - "current_period": "df665ecb-1762-47a9-9c66-f938d251c02a", - "epoch": 2 - }` - zoneGroupGetMultisiteJSON = `{ - "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", - "name": "zonegroup-a", - "api_name": "zonegroup-a", - "is_master": "true", - "endpoints": [ - ":80" - ], - "hostnames": [], - "hostnames_s3website": [], - "master_zone": "6cb39d2c-3005-49da-9be3-c1a92a97d28a", - "zones": [ - { - "id": "6cb39d2c-3005-49da-9be3-c1a92a97d28a", - "name": "zone-a", - "endpoints": [ - ":80" - ], - "log_meta": "false", - "log_data": "false", - "bucket_index_max_shards": 0, - "read_only": "false", - "tier_type": "", - "sync_from_all": "true", - "sync_from": [], - "redirect_zone": "" - } - ], - "placement_targets": [ - { - "name": "default-placement", - "tags": [], - "storage_classes": [ - "STANDARD" - ] - } - ], - "default_placement": "default-placement", - "realm_id": "237e6250-5f7d-4b85-9359-8cb2b1848507" - }` - zoneGetMultisiteJSON = `{ - "id": "6cb39d2c-3005-49da-9be3-c1a92a97d28a", - "name": "zone-a", - "domain_root": "my-store.rgw.meta:root", - "control_pool": "my-store.rgw.control", - "gc_pool": "my-store.rgw.log:gc", - "lc_pool": "my-store.rgw.log:lc", - "log_pool": "my-store.rgw.log", - "intent_log_pool": "my-store.rgw.log:intent", - "usage_log_pool": "my-store.rgw.log:usage", - "reshard_pool": "my-store.rgw.log:reshard", - "user_keys_pool": "my-store.rgw.meta:users.keys", - "user_email_pool": "my-store.rgw.meta:users.email", - "user_swift_pool": "my-store.rgw.meta:users.swift", - "user_uid_pool": "my-store.rgw.meta:users.uid", - "otp_pool": "my-store.rgw.otp", - "system_key": { - "access_key": "", - "secret_key": "" - }, - "placement_pools": [ - { - "key": "default-placement", - "val": { - "index_pool": "my-store.rgw.buckets.index", - "storage_classes": { - "STANDARD": { - "data_pool": "my-store.rgw.buckets.data" - } - }, - "data_extra_pool": "my-store.rgw.buckets.non-ec", - "index_type": 0 - } - } - ], - "metadata_heap": "", - "realm_id": "" - }` -) - -var ( - name = "my-user" - namespace = "rook-ceph" - store = "my-store" -) - -func TestCephObjectStoreController(t *testing.T) { - ctx := context.TODO() - // Set DEBUG logging - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - os.Setenv("ROOK_LOG_LEVEL", "DEBUG") - - // - // TEST 1 SETUP - // - // FAILURE because no CephCluster - // - // A Pool resource with metadata and spec. - objectStore := &cephv1.CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: store, - Namespace: namespace, - }, - Spec: cephv1.ObjectStoreSpec{}, - TypeMeta: controllerTypeMeta, - } - objectStore.Spec.Gateway.Port = 80 - - // Objects to track in the fake client. - object := []runtime.Object{ - objectStore, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - return "", nil - }, - } - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectStore{}) - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileCephObjectStore object with the scheme and fake client. - r := &ReconcileCephObjectStore{ - client: cl, - scheme: s, - context: c, - objectStoreChannels: make(map[string]*objectStoreHealth), - recorder: k8sutil.NewEventReporter(record.NewFakeRecorder(5)), - } - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: store, - Namespace: namespace, - }, - } - logger.Info("STARTING PHASE 1") - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 1 DONE") - - // - // TEST 2: - // - // FAILURE we have a cluster but it's not ready - // - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - object = append(object, cephCluster) - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileCephObjectStore object with the scheme and fake client. - r = &ReconcileCephObjectStore{ - client: cl, - scheme: s, - context: c, - objectStoreChannels: make(map[string]*objectStoreHealth), - recorder: k8sutil.NewEventReporter(record.NewFakeRecorder(5)), - } - logger.Info("STARTING PHASE 2") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 2 DONE") - - // - // TEST 3: - // - // SUCCESS! The CephCluster is ready - // - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Override executor with the new ceph status and more content - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return rgwCephAuthGetOrCreateKey, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - if args[0] == "osd" && args[1] == "lspools" { - // ceph actually outputs this all on one line, but this parses the same - return `[ - {"poolnum":1,"poolname":"replicapool"}, - {"poolnum":2,"poolname":"device_health_metrics"}, - {"poolnum":3,"poolname":".rgw.root"}, - {"poolnum":4,"poolname":"my-store.rgw.buckets.index"}, - {"poolnum":5,"poolname":"my-store.rgw.buckets.non-ec"}, - {"poolnum":6,"poolname":"my-store.rgw.log"}, - {"poolnum":7,"poolname":"my-store.rgw.control"}, - {"poolnum":8,"poolname":"my-store.rgw.meta"}, - {"poolnum":9,"poolname":"my-store.rgw.buckets.data"} - ]`, nil - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "realm" && args[1] == "list" { - return realmListJSON, nil - } - if args[0] == "realm" && args[1] == "get" { - return realmGetJSON, nil - } - if args[0] == "zonegroup" && args[1] == "get" { - return zoneGroupGetJSON, nil - } - if args[0] == "zone" && args[1] == "get" { - return zoneGetJSON, nil - } - if args[0] == "user" { - return userCreateJSON, nil - } - return "", nil - }, - } - c.Executor = executor - - // Create a ReconcileCephObjectStore object with the scheme and fake client. - r = &ReconcileCephObjectStore{ - client: cl, - scheme: s, - context: c, - objectStoreChannels: make(map[string]*objectStoreHealth), - recorder: k8sutil.NewEventReporter(record.NewFakeRecorder(5)), - } - - logger.Info("STARTING PHASE 3") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, objectStore) - assert.NoError(t, err) - assert.Equal(t, cephv1.ConditionProgressing, objectStore.Status.Phase, objectStore) - assert.NotEmpty(t, objectStore.Status.Info["endpoint"], objectStore) - assert.Equal(t, "http://rook-ceph-rgw-my-store.rook-ceph.svc:80", objectStore.Status.Info["endpoint"], objectStore) - logger.Info("PHASE 3 DONE") -} - -func TestCephObjectStoreControllerMultisite(t *testing.T) { - ctx := context.TODO() - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - os.Setenv("ROOK_LOG_LEVEL", "DEBUG") - - zoneName := "zone-a" - zoneGroupName := "zonegroup-a" - realmName := "realm-a" - - metadataPool := cephv1.PoolSpec{} - dataPool := cephv1.PoolSpec{} - - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: k8sutil.ReadyStatus, - CephStatus: &cephv1.CephStatus{ - Health: "HEALTH_OK", - }, - }, - } - - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - - objectZone := &cephv1.CephObjectZone{ - ObjectMeta: metav1.ObjectMeta{ - Name: zoneName, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectZone", - }, - Spec: cephv1.ObjectZoneSpec{ - ZoneGroup: zoneGroupName, - MetadataPool: metadataPool, - DataPool: dataPool, - }, - } - - objectZoneGroup := &cephv1.CephObjectZoneGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: zoneGroupName, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectZoneGroup", - }, - Spec: cephv1.ObjectZoneGroupSpec{}, - } - - objectZoneGroup.Spec.Realm = realmName - - objectRealm := &cephv1.CephObjectRealm{ - ObjectMeta: metav1.ObjectMeta{ - Name: realmName, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectRealm", - }, - Spec: cephv1.ObjectRealmSpec{}, - } - - objectStore := &cephv1.CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: store, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectStore", - }, - Spec: cephv1.ObjectStoreSpec{}, - } - - objectStore.Spec.Zone.Name = zoneName - objectStore.Spec.Gateway.Port = 80 - - object := []runtime.Object{ - objectZone, - objectStore, - objectZoneGroup, - objectRealm, - cephCluster, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return rgwCephAuthGetOrCreateKey, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "realm" && args[1] == "list" { - return realmListMultisiteJSON, nil - } - if args[0] == "realm" && args[1] == "get" { - return realmGetMultisiteJSON, nil - } - if args[0] == "zonegroup" && args[1] == "get" { - return zoneGroupGetMultisiteJSON, nil - } - if args[0] == "zone" && args[1] == "get" { - return zoneGetMultisiteJSON, nil - } - if args[0] == "user" && args[1] == "create" { - return userCreateJSON, nil - } - return "", nil - }, - } - - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectZone{}, &cephv1.CephObjectZoneList{}, &cephv1.CephCluster{}, &cephv1.CephClusterList{}, &cephv1.CephObjectStore{}, &cephv1.CephObjectStoreList{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - r := &ReconcileCephObjectStore{ - client: cl, - scheme: s, - context: c, - objectStoreChannels: make(map[string]*objectStoreHealth), - recorder: k8sutil.NewEventReporter(record.NewFakeRecorder(5)), - } - - _, err := r.context.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: store, - Namespace: namespace, - }, - } - - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, objectStore) - assert.NoError(t, err) -} diff --git a/pkg/operator/ceph/object/dependents.go b/pkg/operator/ceph/object/dependents.go deleted file mode 100644 index 379660831..000000000 --- a/pkg/operator/ceph/object/dependents.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/util/dependents" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const bucketDependentType = "buckets in the object store (could be from ObjectBucketClaims or COSI Buckets)" - -// CephObjectStoreDependents returns the buckets which exist in the object store that should block -// deletion. -// TODO: need unit tests for this - need to be able to fake the admin ops API (nontrivial) -func CephObjectStoreDependents( - clusterdCtx *clusterd.Context, - clusterInfo *client.ClusterInfo, - store *v1.CephObjectStore, - objCtx *Context, - opsCtx *AdminOpsContext, -) (*dependents.DependentList, error) { - nsName := fmt.Sprintf("%s/%s", store.Namespace, store.Name) - baseErrMsg := fmt.Sprintf("failed to get dependents of CephObjectStore %q", nsName) - - deps := dependents.NewDependentList() - - // NOTE: we should still check for buckets when the RGW connection is external since we have no - // way of knowing if the bucket was created due to an ObjectBucketClaim or COSI Bucket. - err := getBucketDependents(deps, clusterdCtx, clusterInfo, store, objCtx, opsCtx) - if err != nil { - return deps, errors.Wrapf(err, baseErrMsg) - } - - // CephObjectStoreUsers - users, err := clusterdCtx.RookClientset.CephV1().CephObjectStoreUsers(store.Namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return deps, errors.Wrapf(err, "%s. failed to list CephObjectStoreUsers for CephObjectStore %q", baseErrMsg, nsName) - } - for _, user := range users.Items { - if user.Spec.Store == store.Name { - deps.Add("CephObjectStoreUsers", user.Name) - } - logger.Debugf("found CephObjectStoreUser %q that does not depend on CephObjectStore %q", user.Name, nsName) - } - - return deps, nil -} - -// adds bucket dependents to the given dependents list -func getBucketDependents( - deps *dependents.DependentList, - clusterdCtx *clusterd.Context, - clusterInfo *client.ClusterInfo, - store *v1.CephObjectStore, - objCtx *Context, - opsCtx *AdminOpsContext, -) error { - nsName := fmt.Sprintf("%s/%s", store.Namespace, store.Name) - - missingPools, err := missingPools(objCtx) - if err != nil { - return errors.Wrapf(err, "failed to check for object buckets") - } - if len(missingPools) > 0 { - // this may be an external object store that does not have all the necessary pools. - // this may also be a Rook-created store that did not finish deleting all pools before the - // Rook operator restarted. - // in either case, we cannot get a successful connection to RGW(s) to check for buckets, and - // we can assume it is safe for deletion to proceed - logger.Infof("skipping check for bucket dependents of CephObjectStore %q. some pools are missing: %v", nsName, missingPools) - return nil - } - - // buckets (including lib-bucket-provisioner buckets and COSI buckets) - buckets, err := opsCtx.AdminOpsClient.ListBuckets(context.TODO()) - if err != nil { - return errors.Wrapf(err, "failed to list buckets in CephObjectStore %q", nsName) - } - healthCheckBucket := genHealthCheckerBucketName(string(store.UID)) - for _, b := range buckets { - if b == healthCheckBucket { - continue // don't include the health checker bucket as a blocking dependent - } - deps.Add(bucketDependentType, b) - } - - return nil -} diff --git a/pkg/operator/ceph/object/dependents_test.go b/pkg/operator/ceph/object/dependents_test.go deleted file mode 100644 index 5d32dcce0..000000000 --- a/pkg/operator/ceph/object/dependents_test.go +++ /dev/null @@ -1,242 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "testing" - "time" - - "github.com/ceph/go-ceph/rgw/admin" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/util/exec" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - dynamicfake "k8s.io/client-go/dynamic/fake" -) - -func TestCephObjectStoreDependents(t *testing.T) { - scheme := runtime.NewScheme() - assert.NoError(t, cephv1.AddToScheme(scheme)) - ns := "test-ceph-object-store-dependents" - var c *clusterd.Context - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "osd" { - if args[1] == "lspools" { - pools := []*client.CephStoragePoolSummary{} - output, err := json.Marshal(pools) - assert.Nil(t, err) - return string(output), nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - }, - } - - newClusterdCtx := func(executor exec.Executor, objects ...runtime.Object) *clusterd.Context { - return &clusterd.Context{ - DynamicClientset: dynamicfake.NewSimpleDynamicClient(scheme, objects...), - RookClientset: rookclient.NewSimpleClientset(), - Executor: executor, - } - } - - pools := []*client.CephStoragePoolSummary{ - {Name: "my-store.rgw.control"}, - {Name: "my-store.rgw.meta"}, - {Name: "my-store.rgw.log"}, - {Name: "my-store.rgw.buckets.non-ec"}, - {Name: "my-store.rgw.buckets.data"}, - {Name: ".rgw.root"}, - {Name: "my-store.rgw.buckets.index"}, - } - - // Mock HTTP call - mockClient := func(bucket string) *MockClient { - return &MockClient{ - MockDo: func(req *http.Request) (*http.Response, error) { - if req.Method == http.MethodGet && req.URL.Path == "rook-ceph-rgw-my-store.mycluster.svc/admin/bucket" { - return &http.Response{ - StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewReader([]byte(bucket))), - }, nil - } - return nil, fmt.Errorf("unexpected request: %q. method %q. path %q", req.URL.RawQuery, req.Method, req.URL.Path) - }, - } - } - - clusterInfo := client.AdminClusterInfo(ns) - // Create objectmeta with the given name in our test namespace - meta := func(name string) v1.ObjectMeta { - return v1.ObjectMeta{ - Name: name, - Namespace: ns, - } - } - - store := &cephv1.CephObjectStore{ - ObjectMeta: v1.ObjectMeta{ - Name: "my-store", - Namespace: ns, - }, - TypeMeta: v1.TypeMeta{ - Kind: "CephObjectStore", - }, - Spec: cephv1.ObjectStoreSpec{ - Gateway: cephv1.GatewaySpec{Port: 80}, - }, - } - - t.Run("missing pools so skipping", func(t *testing.T) { - c = newClusterdCtx(executor) - deps, err := CephObjectStoreDependents(c, clusterInfo, store, NewContext(c, clusterInfo, store.Name), &AdminOpsContext{}) - assert.NoError(t, err) - assert.True(t, deps.Empty()) - }) - - t.Run("no objectstore users and no buckets", func(t *testing.T) { - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "osd" { - if args[1] == "lspools" { - output, err := json.Marshal(pools) - assert.Nil(t, err) - return string(output), nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if command == "radosgw-admin" && args[0] == "user" { - return userCreateJSON, nil - } - return "", errors.Errorf("no such command %v %v", command, args) - }, - } - c = newClusterdCtx(executor) - client, err := admin.New("rook-ceph-rgw-my-store.mycluster.svc", "53S6B9S809NUP19IJ2K3", "1bXPegzsGClvoGAiJdHQD1uOW2sQBLAZM9j9VtXR", mockClient(`[]`)) - assert.NoError(t, err) - deps, err := CephObjectStoreDependents(c, clusterInfo, store, NewContext(c, clusterInfo, store.Name), &AdminOpsContext{AdminOpsClient: client}) - assert.NoError(t, err) - assert.True(t, deps.Empty()) - }) - - t.Run("one objectstore users but wrong store and no buckets", func(t *testing.T) { - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "osd" { - if args[1] == "lspools" { - output, err := json.Marshal(pools) - assert.Nil(t, err) - return string(output), nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if command == "radosgw-admin" && args[0] == "user" { - return userCreateJSON, nil - } - return "", errors.Errorf("no such command %v %v", command, args) - }, - } - c = newClusterdCtx(executor, &cephv1.CephObjectStoreUser{ObjectMeta: meta("u1")}) - _, err := c.RookClientset.CephV1().CephObjectStoreUsers(clusterInfo.Namespace).Create(context.TODO(), &cephv1.CephObjectStoreUser{ObjectMeta: meta("u1")}, v1.CreateOptions{}) - assert.NoError(t, err) - client, err := admin.New("rook-ceph-rgw-my-store.mycluster.svc", "53S6B9S809NUP19IJ2K3", "1bXPegzsGClvoGAiJdHQD1uOW2sQBLAZM9j9VtXR", mockClient(`[]`)) - assert.NoError(t, err) - deps, err := CephObjectStoreDependents(c, clusterInfo, store, NewContext(c, clusterInfo, store.Name), &AdminOpsContext{AdminOpsClient: client}) - assert.NoError(t, err) - assert.True(t, deps.Empty()) - }) - - t.Run("one objectstore users and no buckets", func(t *testing.T) { - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "osd" { - if args[1] == "lspools" { - output, err := json.Marshal(pools) - assert.Nil(t, err) - return string(output), nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if command == "radosgw-admin" && args[0] == "user" { - return userCreateJSON, nil - } - return "", errors.Errorf("no such command %v %v", command, args) - }, - } - c = newClusterdCtx(executor, &cephv1.CephObjectStoreUser{ObjectMeta: meta("u1")}) - _, err := c.RookClientset.CephV1().CephObjectStoreUsers(clusterInfo.Namespace).Create(context.TODO(), &cephv1.CephObjectStoreUser{ObjectMeta: meta("u1"), Spec: cephv1.ObjectStoreUserSpec{Store: "my-store"}}, v1.CreateOptions{}) - assert.NoError(t, err) - client, err := admin.New("rook-ceph-rgw-my-store.mycluster.svc", "53S6B9S809NUP19IJ2K3", "1bXPegzsGClvoGAiJdHQD1uOW2sQBLAZM9j9VtXR", mockClient(`[]`)) - assert.NoError(t, err) - deps, err := CephObjectStoreDependents(c, clusterInfo, store, NewContext(c, clusterInfo, store.Name), &AdminOpsContext{AdminOpsClient: client}) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"u1"}, deps.OfPluralKind("CephObjectStoreUsers")) - }) - - t.Run("no objectstore users and buckets", func(t *testing.T) { - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "osd" { - if args[1] == "lspools" { - output, err := json.Marshal(pools) - assert.Nil(t, err) - return string(output), nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if command == "radosgw-admin" && args[0] == "user" { - return userCreateJSON, nil - } - return "", errors.Errorf("no such command %v %v", command, args) - }, - } - c = newClusterdCtx(executor) - client, err := admin.New("rook-ceph-rgw-my-store.mycluster.svc", "53S6B9S809NUP19IJ2K3", "1bXPegzsGClvoGAiJdHQD1uOW2sQBLAZM9j9VtXR", mockClient(`["my-bucket"]`)) - assert.NoError(t, err) - deps, err := CephObjectStoreDependents(c, clusterInfo, store, NewContext(c, clusterInfo, store.Name), &AdminOpsContext{AdminOpsClient: client}) - assert.NoError(t, err) - assert.False(t, deps.Empty()) - assert.ElementsMatch(t, []string{"my-bucket"}, deps.OfPluralKind("buckets in the object store (could be from ObjectBucketClaims or COSI Buckets)"), deps) - }) -} diff --git a/pkg/operator/ceph/object/health.go b/pkg/operator/ceph/object/health.go deleted file mode 100644 index 00f36dea8..000000000 --- a/pkg/operator/ceph/object/health.go +++ /dev/null @@ -1,287 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - "fmt" - "time" - - "github.com/ceph/go-ceph/rgw/admin" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - s3UserHealthCheckName = "rook-ceph-internal-s3-user-checker" - s3HealthCheckBucketName = "rook-ceph-bucket-checker" - s3HealthCheckObjectBody = "Test Rook Object Data" - s3HealthCheckObjectKey = "rookHealthCheckTestObject" - contentType = "plain/text" -) - -var ( - defaultHealthCheckInterval = 1 * time.Minute -) - -// bucketChecker aggregates the mon/cluster info needed to check the health of the monitors -type bucketChecker struct { - context *clusterd.Context - objContext *AdminOpsContext - interval *time.Duration - port int32 - client client.Client - namespacedName types.NamespacedName - objectStoreSpec *cephv1.ObjectStoreSpec -} - -// newbucketChecker creates a new HealthChecker object -func newBucketChecker( - ctx *clusterd.Context, objContext *Context, client client.Client, namespacedName types.NamespacedName, objectStoreSpec *cephv1.ObjectStoreSpec, -) (*bucketChecker, error) { - port, err := objectStoreSpec.GetPort() - if err != nil { - return nil, errors.Wrapf(err, "failed to create bucket checker for CephObjectStore %q", namespacedName.String()) - } - - opsCtx, err := NewMultisiteAdminOpsContext(objContext, objectStoreSpec) - if err != nil { - return nil, errors.Wrapf(err, "failed to create bucket checker for CephObjectStore %q", namespacedName.String()) - } - - c := &bucketChecker{ - context: ctx, - objContext: opsCtx, - interval: &defaultHealthCheckInterval, - port: port, - namespacedName: namespacedName, - client: client, - objectStoreSpec: objectStoreSpec, - } - - // allow overriding the check interval - checkInterval := objectStoreSpec.HealthCheck.Bucket.Interval - if checkInterval != nil { - logger.Infof("ceph rgw status check interval for object store %q is %q", namespacedName.Name, checkInterval.Duration.String()) - c.interval = &checkInterval.Duration - } - - return c, nil -} - -// checkObjectStore periodically checks the health of the cluster -func (c *bucketChecker) checkObjectStore(stopCh chan struct{}) { - // check the object store health immediately before starting the loop - err := c.checkObjectStoreHealth() - if err != nil { - updateStatusBucket(c.client, c.namespacedName, cephv1.ConditionFailure, err.Error()) - logger.Debugf("failed to check rgw health for object store %q. %v", c.namespacedName.Name, err) - } - - for { - select { - case <-stopCh: - // purge bucket and s3 user - // Needed for external mode where in converged everything goes away with the CR deletion - c.cleanupHealthCheck() - logger.Infof("stopping monitoring of rgw endpoints for object store %q", c.namespacedName.Name) - return - - case <-time.After(*c.interval): - logger.Debugf("checking rgw health of object store %q", c.namespacedName.Name) - err := c.checkObjectStoreHealth() - if err != nil { - updateStatusBucket(c.client, c.namespacedName, cephv1.ConditionFailure, err.Error()) - logger.Debugf("failed to check rgw health for object store %q. %v", c.namespacedName.Name, err) - } - } - } -} - -func (c *bucketChecker) checkObjectStoreHealth() error { - /* - 0. purge the s3 object by default - 1. create an S3 user - 2. always use the same user - 3. if already exists just re-hydrate the s3 credentials - 4. create a bucket with that user or use the existing one (always use the same bucket) - 5. create a check file - 6. get the hash of the file - 7. PUT the file - 8. GET the file - 9. compare hashes - 10. delete object on bucket - 11. update CR health status check - - Always keep the bucket and the user for the health check, just do PUT and GET because bucket creation is expensive - */ - - // Keep admin ops context up-to date if there are config changes - if err := UpdateEndpoint(&c.objContext.Context, c.objectStoreSpec); err != nil { - return errors.Wrapf(err, "failed to parse updated CephObjectStore spec") - } - - // Generate unique user and bucket name - bucketName := genHealthCheckerBucketName(c.objContext.UID) - userConfig := genUserCheckerConfig(c.objContext.UID) - - // Create checker user - logger.Debugf("creating s3 user object %q for object store %q health check", userConfig.ID, c.namespacedName.Name) - var user admin.User - user, err := c.objContext.AdminOpsClient.GetUser(context.TODO(), userConfig) - if err != nil { - if errors.Is(err, admin.ErrNoSuchUser) { - user, err = c.objContext.AdminOpsClient.CreateUser(context.TODO(), userConfig) - if err != nil { - return errors.Wrapf(err, "failed to create from ceph object user %v", userConfig.ID) - } - } else { - return errors.Wrapf(err, "failed to get details from ceph object user %q", userConfig.ID) - } - } - - // Set access and secret key - tlsCert := c.objContext.TlsCert - s3endpoint := c.objContext.Endpoint - s3AccessKey := user.Keys[0].AccessKey - s3SecretKey := user.Keys[0].SecretKey - - // Initiate s3 agent - logger.Debugf("initializing s3 connection for object store %q", c.namespacedName.Name) - s3client, err := NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, false, tlsCert) - if err != nil { - return errors.Wrap(err, "failed to initialize s3 connection") - } - - // Force purge the s3 object before starting anything - cleanupObjectHealthCheck(s3client, c.objContext.UID) - - // Bucket health test - err = c.testBucketHealth(s3client, bucketName) - if err != nil { - return errors.Wrapf(err, "failed to run bucket health checks for object store %q", c.namespacedName.String()) - } - - logger.Debugf("successfully checked object store endpoint for object store %q", c.namespacedName.String()) - - // Update the EndpointStatus in the CR to reflect the healthyness - updateStatusBucket(c.client, c.namespacedName, cephv1.ConditionConnected, "") - - return nil -} - -func cleanupObjectHealthCheck(s3client *S3Agent, objectStoreUID string) { - bucketToDelete := genHealthCheckerBucketName(objectStoreUID) - logger.Debugf("deleting object %q from bucket %q", s3HealthCheckObjectKey, bucketToDelete) - _, err := s3client.DeleteObjectInBucket(bucketToDelete, s3HealthCheckObjectKey) - if err != nil { - logger.Errorf("failed to delete object in bucket. %v", err) - } -} - -func (c *bucketChecker) cleanupHealthCheck() { - bucketToDelete := genHealthCheckerBucketName(c.objContext.UID) - logger.Infof("deleting object %q from bucket %q in object store %q", s3HealthCheckObjectKey, bucketToDelete, c.namespacedName.Name) - - thePurge := true - err := c.objContext.AdminOpsClient.RemoveBucket(context.TODO(), admin.Bucket{Bucket: bucketToDelete, PurgeObject: &thePurge}) - if err != nil { - if errors.Is(err, admin.ErrNoSuchBucket) { - // opinion: "not found" is not an error - logger.Debugf("bucket %q does not exist", bucketToDelete) - } else { - logger.Errorf("failed to delete bucket %q for object store %q. %v", bucketToDelete, c.namespacedName.Name, err) - } - } - - userToDelete := genUserCheckerConfig(c.objContext.UID) - err = c.objContext.AdminOpsClient.RemoveUser(context.TODO(), userToDelete) - if err != nil && !errors.Is(err, admin.ErrNoSuchUser) { - logger.Errorf("failed to delete object user %q for object store %q. %v", userToDelete.ID, c.namespacedName.Name, err) - } - - logger.Debugf("successfully deleted object user %q for object store %q", userToDelete.ID, c.namespacedName.Name) -} - -func toCustomResourceStatus(currentStatus *cephv1.BucketStatus, details string, health cephv1.ConditionType) *cephv1.BucketStatus { - s := &cephv1.BucketStatus{ - Health: health, - LastChecked: time.Now().UTC().Format(time.RFC3339), - Details: details, - } - - if currentStatus != nil { - s.LastChanged = currentStatus.LastChanged - if currentStatus.Details != s.Details { - s.LastChanged = s.LastChecked - } - } - return s -} - -func genHealthCheckerBucketName(uuid string) string { - return fmt.Sprintf("%s-%s", s3HealthCheckBucketName, uuid) -} - -func genUserCheckerConfig(cephObjectStoreUID string) admin.User { - userName := fmt.Sprintf("%s-%s", s3UserHealthCheckName, cephObjectStoreUID) - - return admin.User{ - ID: userName, - DisplayName: userName, - } -} - -func (c *bucketChecker) testBucketHealth(s3client *S3Agent, bucket string) error { - // Purge on exit - defer cleanupObjectHealthCheck(s3client, c.objContext.UID) - - // Create S3 bucket - logger.Debugf("creating bucket %q", bucket) - err := s3client.CreateBucketNoInfoLogging(bucket) - if err != nil { - return errors.Wrapf(err, "failed to create bucket %q for object store %q", bucket, c.namespacedName.Name) - } - - // Put an object into the bucket - logger.Debugf("putting object %q in bucket %q for object store %q", s3HealthCheckObjectKey, bucket, c.namespacedName.Name) - _, err = s3client.PutObjectInBucket(bucket, string(s3HealthCheckObjectBody), s3HealthCheckObjectKey, contentType) - if err != nil { - return errors.Wrapf(err, "failed to put object %q in bucket %q for object store %q", s3HealthCheckObjectKey, bucket, c.namespacedName.Name) - } - - // Get the object from the bucket - logger.Debugf("getting object %q in bucket %q for object store %q", s3HealthCheckObjectKey, bucket, c.namespacedName.Name) - read, err := s3client.GetObjectInBucket(bucket, s3HealthCheckObjectKey) - if err != nil { - return errors.Wrapf(err, "failed to get object %q in bucket %q for object store %q", s3HealthCheckObjectKey, bucket, c.namespacedName.Name) - } - - // Compare the old and the existing object - logger.Debugf("comparing objects hash for object store %q", c.namespacedName.Name) - oldHash := k8sutil.Hash(s3HealthCheckObjectBody) - currentHash := k8sutil.Hash(read) - if currentHash != oldHash { - return errors.Wrapf(err, "wrong file content, old file hash is %q and new one is %q for object store %q", oldHash, currentHash, c.namespacedName.Name) - } - - return nil -} diff --git a/pkg/operator/ceph/object/mime.go b/pkg/operator/ceph/object/mime.go deleted file mode 100644 index 7a47d5be6..000000000 --- a/pkg/operator/ceph/object/mime.go +++ /dev/null @@ -1,891 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "fmt" - "path" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" -) - -const ( - // these control the location where the mime.types file is mounted in containers. - // Ceph's default is /etc/mime.types, but we can't mount a configmap to `/etc` without breaking - // the container. /etc/ceph/rgw won't overwrite ceph.conf (if it exists), and it will be easy to - // find manually by inspection of the `/etc/ceph` dir. - mimeTypesMountDir = "/etc/ceph/rgw" - mimeTypesFileName = "mime.types" -) - -func (c *clusterConfig) mimeTypesConfigMapName() string { - return fmt.Sprintf("%s-mime-types", instanceName(c.store.Name)) -} - -func mimeTypesMountPath() string { - return path.Join(mimeTypesMountDir, mimeTypesFileName) -} - -// store mime.types file in a config map -func (c *clusterConfig) generateMimeTypes() error { - k := k8sutil.NewConfigMapKVStore(c.store.Namespace, c.context.Clientset, c.ownerInfo) - if _, err := k.GetValue(c.mimeTypesConfigMapName(), mimeTypesFileName); err == nil || !kerrors.IsNotFound(err) { - logger.Infof("config map %q for object store %q already exists, not overwriting", c.mimeTypesConfigMapName(), c.store.Name) - return nil - } - // is not found - if err := k.SetValue(c.mimeTypesConfigMapName(), mimeTypesFileName, mimeTypes); err != nil { - return errors.Wrapf(err, "failed to create config map for object store %q", c.store.Name) - } - return nil -} - -func (c *clusterConfig) mimeTypesVolume() v1.Volume { - return v1.Volume{ - Name: c.mimeTypesConfigMapName(), - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{ - Name: c.mimeTypesConfigMapName(), - }}}} -} - -func (c *clusterConfig) mimeTypesVolumeMount() v1.VolumeMount { - return v1.VolumeMount{ - Name: c.mimeTypesConfigMapName(), - MountPath: mimeTypesMountDir, - ReadOnly: true, - // read-only so malicious actors can't edit, which could allow them to add file type - // execution vulnerabilities that an admin has taken care to eliminate - } -} - -const mimeTypes = ` -application/activemessage -application/andrew-inset ez -application/annodex anx -application/applefile -application/atom+xml atom -application/atomcat+xml atomcat -application/atomicmail -application/atomserv+xml atomsrv -application/batch-SMTP -application/bbolin lin -application/beep+xml -application/cals-1840 -application/commonground -application/cu-seeme cu -application/cybercash -application/davmount+xml davmount -application/dca-rft -application/dec-dx -application/dicom dcm -application/docbook+xml -application/dsptype tsp -application/dvcs -application/ecmascript es -application/edi-consent -application/edi-x12 -application/edifact -application/eshop -application/font-sfnt otf ttf -application/font-tdpfr pfr -application/font-woff woff -application/futuresplash spl -application/ghostview -application/gzip gz -application/hta hta -application/http -application/hyperstudio -application/iges -application/index -application/index.cmd -application/index.obj -application/index.response -application/index.vnd -application/iotp -application/ipp -application/isup -application/java-archive jar -application/java-serialized-object ser -application/java-vm class -application/javascript js -application/json json -application/m3g m3g -application/mac-binhex40 hqx -application/mac-compactpro cpt -application/macwriteii -application/marc -application/mathematica nb nbp -application/mbox mbox -application/ms-tnef -application/msaccess mdb -application/msword doc dot -application/mxf mxf -application/news-message-id -application/news-transmission -application/ocsp-request -application/ocsp-response -application/octet-stream bin deploy msu msp -application/oda oda -application/oebps-package+xml opf -application/ogg ogx -application/onenote one onetoc2 onetmp onepkg -application/parityfec -application/pdf pdf -application/pgp-encrypted pgp -application/pgp-keys key -application/pgp-signature sig -application/pics-rules prf -application/pkcs10 -application/pkcs7-mime -application/pkcs7-signature -application/pkix-cert -application/pkix-crl -application/pkixcmp -application/postscript ps ai eps epsi epsf eps2 eps3 -application/prs.alvestrand.titrax-sheet -application/prs.cww -application/prs.nprend -application/qsig -application/rar rar -application/rdf+xml rdf -application/remote-printing -application/riscos -application/rtf rtf -application/sdp -application/set-payment -application/set-payment-initiation -application/set-registration -application/set-registration-initiation -application/sgml -application/sgml-open-catalog -application/sieve -application/sla stl -application/slate -application/smil+xml smi smil -application/timestamp-query -application/timestamp-reply -application/vemmi -application/whoispp-query -application/whoispp-response -application/wita -application/x400-bp -application/xhtml+xml xhtml xht -application/xml xml xsd -application/xml-dtd -application/xml-external-parsed-entity -application/xslt+xml xsl xslt -application/xspf+xml xspf -application/zip zip -application/vnd.3M.Post-it-Notes -application/vnd.accpac.simply.aso -application/vnd.accpac.simply.imp -application/vnd.acucobol -application/vnd.aether.imp -application/vnd.android.package-archive apk -application/vnd.anser-web-certificate-issue-initiation -application/vnd.anser-web-funds-transfer-initiation -application/vnd.audiograph -application/vnd.bmi -application/vnd.businessobjects -application/vnd.canon-cpdl -application/vnd.canon-lips -application/vnd.cinderella cdy -application/vnd.claymore -application/vnd.commerce-battelle -application/vnd.commonspace -application/vnd.comsocaller -application/vnd.contact.cmsg -application/vnd.cosmocaller -application/vnd.ctc-posml -application/vnd.cups-postscript -application/vnd.cups-raster -application/vnd.cups-raw -application/vnd.cybank -application/vnd.debian.binary-package deb ddeb udeb -application/vnd.dna -application/vnd.dpgraph -application/vnd.dxr -application/vnd.ecdis-update -application/vnd.ecowin.chart -application/vnd.ecowin.filerequest -application/vnd.ecowin.fileupdate -application/vnd.ecowin.series -application/vnd.ecowin.seriesrequest -application/vnd.ecowin.seriesupdate -application/vnd.enliven -application/vnd.epson.esf -application/vnd.epson.msf -application/vnd.epson.quickanime -application/vnd.epson.salt -application/vnd.epson.ssf -application/vnd.ericsson.quickcall -application/vnd.eudora.data -application/vnd.fdf -application/vnd.ffsns -application/vnd.flographit -application/vnd.font-fontforge-sfd sfd -application/vnd.framemaker -application/vnd.fsc.weblaunch -application/vnd.fujitsu.oasys -application/vnd.fujitsu.oasys2 -application/vnd.fujitsu.oasys3 -application/vnd.fujitsu.oasysgp -application/vnd.fujitsu.oasysprs -application/vnd.fujixerox.ddd -application/vnd.fujixerox.docuworks -application/vnd.fujixerox.docuworks.binder -application/vnd.fut-misnet -application/vnd.google-earth.kml+xml kml -application/vnd.google-earth.kmz kmz -application/vnd.grafeq -application/vnd.groove-account -application/vnd.groove-identity-message -application/vnd.groove-injector -application/vnd.groove-tool-message -application/vnd.groove-tool-template -application/vnd.groove-vcard -application/vnd.hhe.lesson-player -application/vnd.hp-HPGL -application/vnd.hp-PCL -application/vnd.hp-PCLXL -application/vnd.hp-hpid -application/vnd.hp-hps -application/vnd.httphone -application/vnd.hzn-3d-crossword -application/vnd.ibm.MiniPay -application/vnd.ibm.afplinedata -application/vnd.ibm.modcap -application/vnd.informix-visionary -application/vnd.intercon.formnet -application/vnd.intertrust.digibox -application/vnd.intertrust.nncp -application/vnd.intu.qbo -application/vnd.intu.qfx -application/vnd.irepository.package+xml -application/vnd.is-xpr -application/vnd.japannet-directory-service -application/vnd.japannet-jpnstore-wakeup -application/vnd.japannet-payment-wakeup -application/vnd.japannet-registration -application/vnd.japannet-registration-wakeup -application/vnd.japannet-setstore-wakeup -application/vnd.japannet-verification -application/vnd.japannet-verification-wakeup -application/vnd.koan -application/vnd.lotus-1-2-3 -application/vnd.lotus-approach -application/vnd.lotus-freelance -application/vnd.lotus-notes -application/vnd.lotus-organizer -application/vnd.lotus-screencam -application/vnd.lotus-wordpro -application/vnd.mcd -application/vnd.mediastation.cdkey -application/vnd.meridian-slingshot -application/vnd.mif -application/vnd.minisoft-hp3000-save -application/vnd.mitsubishi.misty-guard.trustweb -application/vnd.mobius.daf -application/vnd.mobius.dis -application/vnd.mobius.msl -application/vnd.mobius.plc -application/vnd.mobius.txf -application/vnd.motorola.flexsuite -application/vnd.motorola.flexsuite.adsi -application/vnd.motorola.flexsuite.fis -application/vnd.motorola.flexsuite.gotap -application/vnd.motorola.flexsuite.kmr -application/vnd.motorola.flexsuite.ttc -application/vnd.motorola.flexsuite.wem -application/vnd.mozilla.xul+xml xul -application/vnd.ms-artgalry -application/vnd.ms-asf -application/vnd.ms-excel xls xlb xlt -application/vnd.ms-excel.addin.macroEnabled.12 xlam -application/vnd.ms-excel.sheet.binary.macroEnabled.12 xlsb -application/vnd.ms-excel.sheet.macroEnabled.12 xlsm -application/vnd.ms-excel.template.macroEnabled.12 xltm -application/vnd.ms-fontobject eot -application/vnd.ms-lrm -application/vnd.ms-officetheme thmx -application/vnd.ms-pki.seccat cat -#application/vnd.ms-pki.stl stl -application/vnd.ms-powerpoint ppt pps -application/vnd.ms-powerpoint.addin.macroEnabled.12 ppam -application/vnd.ms-powerpoint.presentation.macroEnabled.12 pptm -application/vnd.ms-powerpoint.slide.macroEnabled.12 sldm -application/vnd.ms-powerpoint.slideshow.macroEnabled.12 ppsm -application/vnd.ms-powerpoint.template.macroEnabled.12 potm -application/vnd.ms-project -application/vnd.ms-tnef -application/vnd.ms-word.document.macroEnabled.12 docm -application/vnd.ms-word.template.macroEnabled.12 dotm -application/vnd.ms-works -application/vnd.mseq -application/vnd.msign -application/vnd.music-niff -application/vnd.musician -application/vnd.netfpx -application/vnd.noblenet-directory -application/vnd.noblenet-sealer -application/vnd.noblenet-web -application/vnd.novadigm.EDM -application/vnd.novadigm.EDX -application/vnd.novadigm.EXT -application/vnd.oasis.opendocument.chart odc -application/vnd.oasis.opendocument.database odb -application/vnd.oasis.opendocument.formula odf -application/vnd.oasis.opendocument.graphics odg -application/vnd.oasis.opendocument.graphics-template otg -application/vnd.oasis.opendocument.image odi -application/vnd.oasis.opendocument.presentation odp -application/vnd.oasis.opendocument.presentation-template otp -application/vnd.oasis.opendocument.spreadsheet ods -application/vnd.oasis.opendocument.spreadsheet-template ots -application/vnd.oasis.opendocument.text odt -application/vnd.oasis.opendocument.text-master odm -application/vnd.oasis.opendocument.text-template ott -application/vnd.oasis.opendocument.text-web oth -application/vnd.openxmlformats-officedocument.presentationml.presentation pptx -application/vnd.openxmlformats-officedocument.presentationml.slide sldx -application/vnd.openxmlformats-officedocument.presentationml.slideshow ppsx -application/vnd.openxmlformats-officedocument.presentationml.template potx -application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx -application/vnd.openxmlformats-officedocument.spreadsheetml.template xltx -application/vnd.openxmlformats-officedocument.wordprocessingml.document docx -application/vnd.openxmlformats-officedocument.wordprocessingml.template dotx -application/vnd.osa.netdeploy -application/vnd.palm -application/vnd.pg.format -application/vnd.pg.osasli -application/vnd.powerbuilder6 -application/vnd.powerbuilder6-s -application/vnd.powerbuilder7 -application/vnd.powerbuilder7-s -application/vnd.powerbuilder75 -application/vnd.powerbuilder75-s -application/vnd.previewsystems.box -application/vnd.publishare-delta-tree -application/vnd.pvi.ptid1 -application/vnd.pwg-xhtml-print+xml -application/vnd.rapid -application/vnd.rim.cod cod -application/vnd.s3sms -application/vnd.seemail -application/vnd.shana.informed.formdata -application/vnd.shana.informed.formtemplate -application/vnd.shana.informed.interchange -application/vnd.shana.informed.package -application/vnd.smaf mmf -application/vnd.sss-cod -application/vnd.sss-dtf -application/vnd.sss-ntf -application/vnd.stardivision.calc sdc -application/vnd.stardivision.chart sds -application/vnd.stardivision.draw sda -application/vnd.stardivision.impress sdd -application/vnd.stardivision.math sdf -application/vnd.stardivision.writer sdw -application/vnd.stardivision.writer-global sgl -application/vnd.street-stream -application/vnd.sun.xml.calc sxc -application/vnd.sun.xml.calc.template stc -application/vnd.sun.xml.draw sxd -application/vnd.sun.xml.draw.template std -application/vnd.sun.xml.impress sxi -application/vnd.sun.xml.impress.template sti -application/vnd.sun.xml.math sxm -application/vnd.sun.xml.writer sxw -application/vnd.sun.xml.writer.global sxg -application/vnd.sun.xml.writer.template stw -application/vnd.svd -application/vnd.swiftview-ics -application/vnd.symbian.install sis -application/vnd.tcpdump.pcap cap pcap -application/vnd.triscape.mxs -application/vnd.trueapp -application/vnd.truedoc -application/vnd.tve-trigger -application/vnd.ufdl -application/vnd.uplanet.alert -application/vnd.uplanet.alert-wbxml -application/vnd.uplanet.bearer-choice -application/vnd.uplanet.bearer-choice-wbxml -application/vnd.uplanet.cacheop -application/vnd.uplanet.cacheop-wbxml -application/vnd.uplanet.channel -application/vnd.uplanet.channel-wbxml -application/vnd.uplanet.list -application/vnd.uplanet.list-wbxml -application/vnd.uplanet.listcmd -application/vnd.uplanet.listcmd-wbxml -application/vnd.uplanet.signal -application/vnd.vcx -application/vnd.vectorworks -application/vnd.vidsoft.vidconference -application/vnd.visio vsd vst vsw vss -application/vnd.vividence.scriptfile -application/vnd.wap.sic -application/vnd.wap.slc -application/vnd.wap.wbxml wbxml -application/vnd.wap.wmlc wmlc -application/vnd.wap.wmlscriptc wmlsc -application/vnd.webturbo -application/vnd.wordperfect wpd -application/vnd.wordperfect5.1 wp5 -application/vnd.wrq-hp3000-labelled -application/vnd.wt.stf -application/vnd.xara -application/vnd.xfdl -application/vnd.yellowriver-custom-menu -application/zlib -application/x-123 wk -application/x-7z-compressed 7z -application/x-abiword abw -application/x-apple-diskimage dmg -application/x-bcpio bcpio -application/x-bittorrent torrent -application/x-cab cab -application/x-cbr cbr -application/x-cbz cbz -application/x-cdf cdf cda -application/x-cdlink vcd -application/x-chess-pgn pgn -application/x-comsol mph -application/x-core -application/x-cpio cpio -application/x-csh csh -application/x-debian-package deb udeb -application/x-director dcr dir dxr -application/x-dms dms -application/x-doom wad -application/x-dvi dvi -application/x-executable -application/x-font pfa pfb gsf -application/x-font-pcf pcf pcf.Z -application/x-freemind mm -application/x-futuresplash spl -application/x-ganttproject gan -application/x-gnumeric gnumeric -application/x-go-sgf sgf -application/x-graphing-calculator gcf -application/x-gtar gtar -application/x-gtar-compressed tgz taz -application/x-hdf hdf -#application/x-httpd-eruby rhtml -#application/x-httpd-php phtml pht php -#application/x-httpd-php-source phps -#application/x-httpd-php3 php3 -#application/x-httpd-php3-preprocessed php3p -#application/x-httpd-php4 php4 -#application/x-httpd-php5 php5 -application/x-hwp hwp -application/x-ica ica -application/x-info info -application/x-internet-signup ins isp -application/x-iphone iii -application/x-iso9660-image iso -application/x-jam jam -application/x-java-applet -application/x-java-bean -application/x-java-jnlp-file jnlp -application/x-jmol jmz -application/x-kchart chrt -application/x-kdelnk -application/x-killustrator kil -application/x-koan skp skd skt skm -application/x-kpresenter kpr kpt -application/x-kspread ksp -application/x-kword kwd kwt -application/x-latex latex -application/x-lha lha -application/x-lyx lyx -application/x-lzh lzh -application/x-lzx lzx -application/x-maker frm maker frame fm fb book fbdoc -application/x-mif mif -application/x-mpegURL m3u8 -application/x-ms-application application -application/x-ms-manifest manifest -application/x-ms-wmd wmd -application/x-ms-wmz wmz -application/x-msdos-program com exe bat dll -application/x-msi msi -application/x-netcdf nc -application/x-ns-proxy-autoconfig pac -application/x-nwc nwc -application/x-object o -application/x-oz-application oza -application/x-pkcs7-certreqresp p7r -application/x-pkcs7-crl crl -application/x-python-code pyc pyo -application/x-qgis qgs shp shx -application/x-quicktimeplayer qtl -application/x-rdp rdp -application/x-redhat-package-manager rpm -application/x-rss+xml rss -application/x-ruby rb -application/x-rx -application/x-scilab sci sce -application/x-scilab-xcos xcos -application/x-sh sh -application/x-shar shar -application/x-shellscript -application/x-shockwave-flash swf swfl -application/x-silverlight scr -application/x-sql sql -application/x-stuffit sit sitx -application/x-sv4cpio sv4cpio -application/x-sv4crc sv4crc -application/x-tar tar -application/x-tcl tcl -application/x-tex-gf gf -application/x-tex-pk pk -application/x-texinfo texinfo texi -application/x-trash ~ % bak old sik -application/x-troff t tr roff -application/x-troff-man man -application/x-troff-me me -application/x-troff-ms ms -application/x-ustar ustar -application/x-videolan -application/x-wais-source src -application/x-wingz wz -application/x-x509-ca-cert crt -application/x-xcf xcf -application/x-xfig fig -application/x-xpinstall xpi -application/x-xz xz - -audio/32kadpcm -audio/3gpp -audio/amr amr -audio/amr-wb awb -audio/annodex axa -audio/basic au snd -audio/csound csd orc sco -audio/flac flac -audio/g.722.1 -audio/l16 -audio/midi mid midi kar -audio/mp4a-latm -audio/mpa-robust -audio/mpeg mpga mpega mp2 mp3 m4a -audio/mpegurl m3u -audio/ogg oga ogg opus spx -audio/parityfec -audio/prs.sid sid -audio/telephone-event -audio/tone -audio/vnd.cisco.nse -audio/vnd.cns.anp1 -audio/vnd.cns.inf1 -audio/vnd.digital-winds -audio/vnd.everad.plj -audio/vnd.lucent.voice -audio/vnd.nortel.vbk -audio/vnd.nuera.ecelp4800 -audio/vnd.nuera.ecelp7470 -audio/vnd.nuera.ecelp9600 -audio/vnd.octel.sbc -audio/vnd.qcelp -audio/vnd.rhetorex.32kadpcm -audio/vnd.vmx.cvsd -audio/x-aiff aif aiff aifc -audio/x-gsm gsm -audio/x-mpegurl m3u -audio/x-ms-wma wma -audio/x-ms-wax wax -audio/x-pn-realaudio-plugin -audio/x-pn-realaudio ra rm ram -audio/x-realaudio ra -audio/x-scpls pls -audio/x-sd2 sd2 -audio/x-wav wav - -chemical/x-alchemy alc -chemical/x-cache cac cache -chemical/x-cache-csf csf -chemical/x-cactvs-binary cbin cascii ctab -chemical/x-cdx cdx -chemical/x-cerius cer -chemical/x-chem3d c3d -chemical/x-chemdraw chm -chemical/x-cif cif -chemical/x-cmdf cmdf -chemical/x-cml cml -chemical/x-compass cpa -chemical/x-crossfire bsd -chemical/x-csml csml csm -chemical/x-ctx ctx -chemical/x-cxf cxf cef -#chemical/x-daylight-smiles smi -chemical/x-embl-dl-nucleotide emb embl -chemical/x-galactic-spc spc -chemical/x-gamess-input inp gam gamin -chemical/x-gaussian-checkpoint fch fchk -chemical/x-gaussian-cube cub -chemical/x-gaussian-input gau gjc gjf -chemical/x-gaussian-log gal -chemical/x-gcg8-sequence gcg -chemical/x-genbank gen -chemical/x-hin hin -chemical/x-isostar istr ist -chemical/x-jcamp-dx jdx dx -chemical/x-kinemage kin -chemical/x-macmolecule mcm -chemical/x-macromodel-input mmd mmod -chemical/x-mdl-molfile mol -chemical/x-mdl-rdfile rd -chemical/x-mdl-rxnfile rxn -chemical/x-mdl-sdfile sd sdf -chemical/x-mdl-tgf tgf -#chemical/x-mif mif -chemical/x-mmcif mcif -chemical/x-mol2 mol2 -chemical/x-molconn-Z b -chemical/x-mopac-graph gpt -chemical/x-mopac-input mop mopcrt mpc zmt -chemical/x-mopac-out moo -chemical/x-mopac-vib mvb -chemical/x-ncbi-asn1 asn -chemical/x-ncbi-asn1-ascii prt ent -chemical/x-ncbi-asn1-binary val aso -chemical/x-ncbi-asn1-spec asn -chemical/x-pdb pdb ent -chemical/x-rosdal ros -chemical/x-swissprot sw -chemical/x-vamas-iso14976 vms -chemical/x-vmd vmd -chemical/x-xtel xtel -chemical/x-xyz xyz - -image/cgm -image/g3fax -image/gif gif -image/ief ief -image/jp2 jp2 jpg2 -image/jpeg jpeg jpg jpe -image/jpm jpm -image/jpx jpx jpf -image/naplps -image/pcx pcx -image/png png -image/prs.btif -image/prs.pti -image/svg+xml svg svgz -image/tiff tiff tif -image/vnd.cns.inf2 -image/vnd.djvu djvu djv -image/vnd.dwg -image/vnd.dxf -image/vnd.fastbidsheet -image/vnd.fpx -image/vnd.fst -image/vnd.fujixerox.edmics-mmr -image/vnd.fujixerox.edmics-rlc -image/vnd.microsoft.icon ico -image/vnd.mix -image/vnd.net-fpx -image/vnd.svf -image/vnd.wap.wbmp wbmp -image/vnd.xiff -image/x-canon-cr2 cr2 -image/x-canon-crw crw -image/x-cmu-raster ras -image/x-coreldraw cdr -image/x-coreldrawpattern pat -image/x-coreldrawtemplate cdt -image/x-corelphotopaint cpt -image/x-epson-erf erf -image/x-icon -image/x-jg art -image/x-jng jng -image/x-ms-bmp bmp -image/x-nikon-nef nef -image/x-olympus-orf orf -image/x-photoshop psd -image/x-portable-anymap pnm -image/x-portable-bitmap pbm -image/x-portable-graymap pgm -image/x-portable-pixmap ppm -image/x-rgb rgb -image/x-xbitmap xbm -image/x-xpixmap xpm -image/x-xwindowdump xwd - -inode/chardevice -inode/blockdevice -inode/directory-locked -inode/directory -inode/fifo -inode/socket - -message/delivery-status -message/disposition-notification -message/external-body -message/http -message/s-http -message/news -message/partial -message/rfc822 eml - -model/iges igs iges -model/mesh msh mesh silo -model/vnd.dwf -model/vnd.flatland.3dml -model/vnd.gdl -model/vnd.gs-gdl -model/vnd.gtw -model/vnd.mts -model/vnd.vtu -model/vrml wrl vrml -model/x3d+vrml x3dv -model/x3d+xml x3d -model/x3d+binary x3db - -multipart/alternative -multipart/appledouble -multipart/byteranges -multipart/digest -multipart/encrypted -multipart/form-data -multipart/header-set -multipart/mixed -multipart/parallel -multipart/related -multipart/report -multipart/signed -multipart/voice-message - -text/cache-manifest appcache -text/calendar ics icz -text/css css -text/csv csv -text/directory -text/english -text/enriched -text/h323 323 -text/html html htm shtml -text/iuls uls -text/mathml mml -text/markdown md markdown -text/parityfec -text/plain asc txt text pot brf srt -text/prs.lines.tag -text/rfc822-headers -text/richtext rtx -text/rtf -text/scriptlet sct wsc -text/t140 -text/texmacs tm -text/tab-separated-values tsv -text/turtle ttl -text/uri-list -text/vcard vcf vcard -text/vnd.abc -text/vnd.curl -text/vnd.debian.copyright -text/vnd.DMClientScript -text/vnd.flatland.3dml -text/vnd.fly -text/vnd.fmi.flexstor -text/vnd.in3d.3dml -text/vnd.in3d.spot -text/vnd.IPTC.NewsML -text/vnd.IPTC.NITF -text/vnd.latex-z -text/vnd.motorola.reflex -text/vnd.ms-mediapackage -text/vnd.sun.j2me.app-descriptor jad -text/vnd.wap.si -text/vnd.wap.sl -text/vnd.wap.wml wml -text/vnd.wap.wmlscript wmls -text/x-bibtex bib -text/x-boo boo -text/x-c++hdr h++ hpp hxx hh -text/x-c++src c++ cpp cxx cc -text/x-chdr h -text/x-component htc -text/x-crontab -text/x-csh csh -text/x-csrc c -text/x-dsrc d -text/x-diff diff patch -text/x-haskell hs -text/x-java java -text/x-lilypond ly -text/x-literate-haskell lhs -text/x-makefile -text/x-moc moc -text/x-pascal p pas -text/x-pcs-gcd gcd -text/x-perl pl pm -text/x-python py -text/x-scala scala -text/x-server-parsed-html -text/x-setext etx -text/x-sfv sfv -text/x-sh sh -text/x-tcl tcl tk -text/x-tex tex ltx sty cls -text/x-vcalendar vcs - -video/3gpp 3gp -video/annodex axv -video/dl dl -video/dv dif dv -video/fli fli -video/gl gl -video/mpeg mpeg mpg mpe -video/MP2T ts -video/mp4 mp4 -video/quicktime qt mov -video/mp4v-es -video/ogg ogv -video/parityfec -video/pointer -video/webm webm -video/vnd.fvt -video/vnd.motorola.video -video/vnd.motorola.videop -video/vnd.mpegurl mxu -video/vnd.mts -video/vnd.nokia.interleaved-multimedia -video/vnd.vivo -video/x-flv flv -video/x-la-asf lsf lsx -video/x-mng mng -video/x-ms-asf asf asx -video/x-ms-wm wm -video/x-ms-wmv wmv -video/x-ms-wmx wmx -video/x-ms-wvx wvx -video/x-msvideo avi -video/x-sgi-movie movie -video/x-matroska mpv mkv - -x-conference/x-cooltalk ice - -x-epoc/x-sisx-app sisx -x-world/x-vrml vrm vrml wrl -` diff --git a/pkg/operator/ceph/object/objectstore.go b/pkg/operator/ceph/object/objectstore.go deleted file mode 100644 index 351b2ef85..000000000 --- a/pkg/operator/ceph/object/objectstore.go +++ /dev/null @@ -1,967 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - "encoding/json" - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" - "github.com/rook/rook/pkg/operator/ceph/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - "golang.org/x/sync/errgroup" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" -) - -const ( - rootPool = ".rgw.root" - - // AppName is the name Rook uses for the object store's application - AppName = "rook-ceph-rgw" - bucketProvisionerName = "ceph.rook.io/bucket" - AccessKeyName = "access-key" - SecretKeyName = "secret-key" - svcDNSSuffix = "svc" -) - -var ( - metadataPools = []string{ - // .rgw.root (rootPool) is appended to this slice where needed - "rgw.control", - "rgw.meta", - "rgw.log", - "rgw.buckets.index", - "rgw.buckets.non-ec", - } - dataPoolName = "rgw.buckets.data" - - // An user with system privileges for dashboard service - DashboardUser = "dashboard-admin" -) - -type idType struct { - ID string `json:"id"` -} - -type zoneGroupType struct { - MasterZoneID string `json:"master_zone"` - IsMaster string `json:"is_master"` - Zones []zoneType `json:"zones"` -} - -type zoneType struct { - Name string `json:"name"` - Endpoints []string `json:"endpoints"` -} - -type realmType struct { - Realms []string `json:"realms"` -} - -func deleteRealmAndPools(objContext *Context, spec cephv1.ObjectStoreSpec) error { - if spec.IsMultisite() { - // since pools for object store are created by the zone, the object store only needs to be removed from the zone - err := removeObjectStoreFromMultisite(objContext, spec) - if err != nil { - return err - } - - return nil - } - - return deleteSingleSiteRealmAndPools(objContext, spec) -} - -func removeObjectStoreFromMultisite(objContext *Context, spec cephv1.ObjectStoreSpec) error { - // get list of endpoints not including the endpoint of the object-store for the zone - zoneEndpointsList, err := getZoneEndpoints(objContext, objContext.Endpoint) - if err != nil { - return err - } - - realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup) - zoneEndpoints := strings.Join(zoneEndpointsList, ",") - endpointArg := fmt.Sprintf("--endpoints=%s", zoneEndpoints) - - zoneIsMaster, err := checkZoneIsMaster(objContext) - if err != nil { - return errors.Wrap(err, "failed to find out zone in Master") - } - - zoneGroupIsMaster := false - if zoneIsMaster { - _, err = RunAdminCommandNoMultisite(objContext, false, "zonegroup", "modify", realmArg, zoneGroupArg, endpointArg) - if err != nil { - - if kerrors.IsNotFound(err) { - return err - } - return errors.Wrapf(err, "failed to remove object store %q endpoint from rgw zone group %q", objContext.Name, objContext.ZoneGroup) - } - logger.Debugf("endpoint %q was removed from zone group %q. the remaining endpoints in the zone group are %q", objContext.Endpoint, objContext.ZoneGroup, zoneEndpoints) - - // check if zone group is master only if zone is master for creating the system user - zoneGroupIsMaster, err = checkZoneGroupIsMaster(objContext) - if err != nil { - return errors.Wrapf(err, "failed to find out whether zone group %q in is the master zone group", objContext.ZoneGroup) - } - } - - _, err = runAdminCommand(objContext, false, "zone", "modify", endpointArg) - if err != nil { - return errors.Wrapf(err, "failed to remove object store %q endpoint from rgw zone %q", objContext.Name, spec.Zone.Name) - } - logger.Debugf("endpoint %q was removed from zone %q. the remaining endpoints in the zone are %q", objContext.Endpoint, objContext.Zone, zoneEndpoints) - - if zoneIsMaster && zoneGroupIsMaster && zoneEndpoints == "" { - logger.Infof("WARNING: No other zone in realm %q can commit to the period or pull the realm until you create another object-store in zone %q", objContext.Realm, objContext.Zone) - } - - // the period will help notify other zones of changes if there are multi-zones - _, err = runAdminCommand(objContext, false, "period", "update", "--commit") - if err != nil { - return errors.Wrap(err, "failed to update period after removing an endpoint from the zone") - } - logger.Infof("successfully updated period for realm %v after removal of object-store %v", objContext.Realm, objContext.Name) - - return nil -} - -func deleteSingleSiteRealmAndPools(objContext *Context, spec cephv1.ObjectStoreSpec) error { - stores, err := getObjectStores(objContext) - if err != nil { - return errors.Wrap(err, "failed to detect object stores during deletion") - } - if len(stores) == 0 { - logger.Infof("did not find object store %q, nothing to delete", objContext.Name) - return nil - } - logger.Infof("Found stores %v when deleting store %s", stores, objContext.Name) - - err = deleteRealm(objContext) - if err != nil { - return errors.Wrap(err, "failed to delete realm") - } - - lastStore := false - if len(stores) == 1 && stores[0] == objContext.Name { - lastStore = true - } - - if !spec.PreservePoolsOnDelete { - err = deletePools(objContext, spec, lastStore) - if err != nil { - return errors.Wrap(err, "failed to delete object store pools") - } - } else { - logger.Infof("PreservePoolsOnDelete is set in object store %s. Pools not deleted", objContext.Name) - } - - return nil -} - -// This is used for quickly getting the name of the realm, zone group, and zone for an object-store to pass into a Context -func getMultisiteForObjectStore(clusterdContext *clusterd.Context, spec *cephv1.ObjectStoreSpec, namespace, name string) (string, string, string, error) { - ctx := context.TODO() - if spec.IsMultisite() { - zone, err := clusterdContext.RookClientset.CephV1().CephObjectZones(namespace).Get(ctx, spec.Zone.Name, metav1.GetOptions{}) - if err != nil { - return "", "", "", errors.Wrapf(err, "failed to find zone for object-store %q", name) - } - - zonegroup, err := clusterdContext.RookClientset.CephV1().CephObjectZoneGroups(namespace).Get(ctx, zone.Spec.ZoneGroup, metav1.GetOptions{}) - if err != nil { - return "", "", "", errors.Wrapf(err, "failed to find zone group for object-store %q", name) - } - - realm, err := clusterdContext.RookClientset.CephV1().CephObjectRealms(namespace).Get(ctx, zonegroup.Spec.Realm, metav1.GetOptions{}) - if err != nil { - return "", "", "", errors.Wrapf(err, "failed to find realm for object-store %q", name) - } - - return realm.Name, zonegroup.Name, zone.Name, nil - } - - return name, name, name, nil -} - -func checkZoneIsMaster(objContext *Context) (bool, error) { - logger.Debugf("checking if zone %v is the master zone", objContext.Zone) - realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup) - zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone) - - zoneGroupJson, err := RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg) - if err != nil { - // This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet - // The caller can nicely handle the error and not overflow the op logs with misleading error messages - if kerrors.IsNotFound(err) { - return false, err - } - return false, errors.Wrap(err, "failed to get rgw zone group") - } - zoneGroupOutput, err := DecodeZoneGroupConfig(zoneGroupJson) - if err != nil { - return false, errors.Wrap(err, "failed to parse zonegroup get json") - } - logger.Debugf("got master zone ID for zone group %v", objContext.ZoneGroup) - - zoneOutput, err := RunAdminCommandNoMultisite(objContext, true, "zone", "get", realmArg, zoneGroupArg, zoneArg) - if err != nil { - // This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet - // The caller can nicely handle the error and not overflow the op logs with misleading error messages - if kerrors.IsNotFound(err) { - return false, err - } - return false, errors.Wrap(err, "failed to get rgw zone") - } - zoneID, err := decodeID(zoneOutput) - if err != nil { - return false, errors.Wrap(err, "failed to parse zone id") - } - logger.Debugf("got zone ID for zone %v", objContext.Zone) - - if zoneID == zoneGroupOutput.MasterZoneID { - logger.Debugf("zone is master") - return true, nil - } - - logger.Debugf("zone is not master") - return false, nil -} - -func checkZoneGroupIsMaster(objContext *Context) (bool, error) { - logger.Debugf("checking if zone group %v is the master zone group", objContext.ZoneGroup) - realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup) - - zoneGroupOutput, err := RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg) - if err != nil { - // This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet - // The caller can nicely handle the error and not overflow the op logs with misleading error messages - if kerrors.IsNotFound(err) { - return false, err - } - return false, errors.Wrap(err, "failed to get rgw zone group") - } - - zoneGroupJson, err := DecodeZoneGroupConfig(zoneGroupOutput) - if err != nil { - return false, errors.Wrap(err, "failed to parse master zone id") - } - - zoneGroupIsMaster, err := strconv.ParseBool(zoneGroupJson.IsMaster) - if err != nil { - return false, errors.Wrap(err, "failed to parse is_master from zone group json into bool") - } - - return zoneGroupIsMaster, nil -} - -func DecodeSecret(secret *v1.Secret, keyName string) (string, error) { - realmKey, ok := secret.Data[keyName] - - if !ok { - return "", errors.New("key was not in secret data") - } - - return string(realmKey), nil -} - -func GetRealmKeyArgs(clusterdContext *clusterd.Context, realmName, namespace string) (string, string, error) { - ctx := context.TODO() - logger.Debugf("getting keys for realm %v", realmName) - // get realm's access and secret keys - realmSecretName := realmName + "-keys" - realmSecret, err := clusterdContext.Clientset.CoreV1().Secrets(namespace).Get(ctx, realmSecretName, metav1.GetOptions{}) - if err != nil { - return "", "", errors.Wrapf(err, "failed to get realm %q keys secret", realmName) - } - logger.Debugf("found keys secret for realm %v", realmName) - - accessKey, err := DecodeSecret(realmSecret, AccessKeyName) - if err != nil { - return "", "", errors.Wrapf(err, "failed to decode realm %q access key", realmName) - } - secretKey, err := DecodeSecret(realmSecret, SecretKeyName) - if err != nil { - return "", "", errors.Wrapf(err, "failed to decode realm %q access key", realmName) - } - logger.Debugf("decoded keys for realm %v", realmName) - - accessKeyArg := fmt.Sprintf("--access-key=%s", accessKey) - secretKeyArg := fmt.Sprintf("--secret-key=%s", secretKey) - - return accessKeyArg, secretKeyArg, nil -} - -func getZoneEndpoints(objContext *Context, serviceEndpoint string) ([]string, error) { - logger.Debugf("getting current endpoints for zone %v", objContext.Zone) - realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup) - - zoneGroupOutput, err := RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg) - if err != nil { - // This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet - // The caller can nicely handle the error and not overflow the op logs with misleading error messages - return []string{}, errorOrIsNotFound(err, "failed to get rgw zone group %q", objContext.Name) - } - zoneGroupJson, err := DecodeZoneGroupConfig(zoneGroupOutput) - if err != nil { - return []string{}, errors.Wrap(err, "failed to parse zones list") - } - - zoneEndpointsList := []string{} - for _, zone := range zoneGroupJson.Zones { - if zone.Name == objContext.Zone { - for _, endpoint := range zone.Endpoints { - // in case object-store operator code is rereconciled, zone modify could get run again with serviceEndpoint added again - if endpoint != serviceEndpoint { - zoneEndpointsList = append(zoneEndpointsList, endpoint) - } - } - break - } - } - - return zoneEndpointsList, nil -} - -func createMultisite(objContext *Context, endpointArg string) error { - logger.Debugf("creating realm, zone group, zone for object-store %v", objContext.Name) - - realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup) - - updatePeriod := false - // create the realm if it doesn't exist yet - output, err := RunAdminCommandNoMultisite(objContext, true, "realm", "get", realmArg) - if err != nil { - // ENOENT means “No such file or directory” - if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { - updatePeriod = true - output, err = RunAdminCommandNoMultisite(objContext, false, "realm", "create", realmArg) - if err != nil { - return errorOrIsNotFound(err, "failed to create ceph realm %q, for reason %q", objContext.ZoneGroup, output) - } - logger.Debugf("created realm %v", objContext.Realm) - } else { - return errorOrIsNotFound(err, "radosgw-admin realm get failed with code %d, for reason %q. %v", strconv.Itoa(code), output, string(kerrors.ReasonForError(err))) - } - } - - // create the zonegroup if it doesn't exist yet - output, err = RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg) - if err != nil { - // ENOENT means “No such file or directory” - if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { - updatePeriod = true - output, err = RunAdminCommandNoMultisite(objContext, false, "zonegroup", "create", "--master", realmArg, zoneGroupArg, endpointArg) - if err != nil { - return errorOrIsNotFound(err, "failed to create ceph zone group %q, for reason %q", objContext.ZoneGroup, output) - } - logger.Debugf("created zone group %v", objContext.ZoneGroup) - } else { - return errorOrIsNotFound(err, "radosgw-admin zonegroup get failed with code %d, for reason %q", strconv.Itoa(code), output) - } - } - - // create the zone if it doesn't exist yet - output, err = runAdminCommand(objContext, true, "zone", "get") - if err != nil { - // ENOENT means “No such file or directory” - if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { - updatePeriod = true - output, err = runAdminCommand(objContext, false, "zone", "create", "--master", endpointArg) - if err != nil { - return errorOrIsNotFound(err, "failed to create ceph zone %q, for reason %q", objContext.Zone, output) - } - logger.Debugf("created zone %v", objContext.Zone) - } else { - return errorOrIsNotFound(err, "radosgw-admin zone get failed with code %d, for reason %q", strconv.Itoa(code), output) - } - } - - if updatePeriod { - // the period will help notify other zones of changes if there are multi-zones - _, err := runAdminCommand(objContext, false, "period", "update", "--commit") - if err != nil { - return errorOrIsNotFound(err, "failed to update period") - } - logger.Debugf("updated period for realm %v", objContext.Realm) - } - - logger.Infof("Multisite for object-store: realm=%s, zonegroup=%s, zone=%s", objContext.Realm, objContext.ZoneGroup, objContext.Zone) - - return nil -} - -func joinMultisite(objContext *Context, endpointArg, zoneEndpoints, namespace string) error { - logger.Debugf("joining zone %v", objContext.Zone) - realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup) - zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone) - - zoneIsMaster, err := checkZoneIsMaster(objContext) - if err != nil { - return err - } - zoneGroupIsMaster := false - - if zoneIsMaster { - // endpoints that are part of a master zone are supposed to be the endpoints for a zone group - _, err := RunAdminCommandNoMultisite(objContext, false, "zonegroup", "modify", realmArg, zoneGroupArg, endpointArg) - if err != nil { - return errorOrIsNotFound(err, "failed to add object store %q in rgw zone group %q", objContext.Name, objContext.ZoneGroup) - } - logger.Debugf("endpoints for zonegroup %q are now %q", objContext.ZoneGroup, zoneEndpoints) - - // check if zone group is master only if zone is master for creating the system user - zoneGroupIsMaster, err = checkZoneGroupIsMaster(objContext) - if err != nil { - return errors.Wrapf(err, "failed to find out whether zone group %q in is the master zone group", objContext.ZoneGroup) - } - } - _, err = RunAdminCommandNoMultisite(objContext, false, "zone", "modify", realmArg, zoneGroupArg, zoneArg, endpointArg) - if err != nil { - return errorOrIsNotFound(err, "failed to add object store %q in rgw zone %q", objContext.Name, objContext.Zone) - } - logger.Debugf("endpoints for zone %q are now %q", objContext.Zone, zoneEndpoints) - - // the period will help notify other zones of changes if there are multi-zones - _, err = RunAdminCommandNoMultisite(objContext, false, "period", "update", "--commit", realmArg, zoneGroupArg, zoneArg) - if err != nil { - return errorOrIsNotFound(err, "failed to update period") - } - logger.Infof("added object store %q to realm %q, zonegroup %q, zone %q", objContext.Name, objContext.Realm, objContext.ZoneGroup, objContext.Zone) - - // create system user for realm for master zone in master zonegorup for multisite scenario - if zoneIsMaster && zoneGroupIsMaster { - err = createSystemUser(objContext, namespace) - if err != nil { - return err - } - } - - return nil -} - -func createSystemUser(objContext *Context, namespace string) error { - uid := objContext.Realm + "-system-user" - uidArg := fmt.Sprintf("--uid=%s", uid) - realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup) - zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone) - - output, err := RunAdminCommandNoMultisite(objContext, false, "user", "info", uidArg) - if err == nil { - logger.Debugf("realm system user %q has already been created", uid) - return nil - } - - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.EINVAL) { - logger.Debugf("realm system user %q not found, running `radosgw-admin user create`", uid) - accessKeyArg, secretKeyArg, err := GetRealmKeyArgs(objContext.Context, objContext.Realm, namespace) - if err != nil { - return errors.Wrap(err, "failed to get keys for realm") - } - logger.Debugf("found keys to create realm system user %v", uid) - systemArg := "--system" - displayNameArg := fmt.Sprintf("--display-name=%s.user", objContext.Realm) - output, err = RunAdminCommandNoMultisite(objContext, false, "user", "create", realmArg, zoneGroupArg, zoneArg, uidArg, displayNameArg, accessKeyArg, secretKeyArg, systemArg) - if err != nil { - return errorOrIsNotFound(err, "failed to create realm system user %q for reason: %q", uid, output) - } - logger.Debugf("created realm system user %v", uid) - } else { - return errorOrIsNotFound(err, "radosgw-admin user info for system user failed with code %d and output %q", strconv.Itoa(code), output) - } - - return nil -} - -func setMultisite(objContext *Context, store *cephv1.CephObjectStore, serviceIP string) error { - logger.Debugf("setting multisite configuration for object-store %v", store.Name) - serviceEndpoint := fmt.Sprintf("http://%s:%d", serviceIP, store.Spec.Gateway.Port) - if store.Spec.Gateway.SecurePort != 0 { - serviceEndpoint = fmt.Sprintf("https://%s:%d", serviceIP, store.Spec.Gateway.SecurePort) - } - - if store.Spec.IsMultisite() { - zoneEndpointsList, err := getZoneEndpoints(objContext, serviceEndpoint) - if err != nil { - return err - } - zoneEndpointsList = append(zoneEndpointsList, serviceEndpoint) - - zoneEndpoints := strings.Join(zoneEndpointsList, ",") - logger.Debugf("Endpoints for zone %q are: %q", objContext.Zone, zoneEndpoints) - endpointArg := fmt.Sprintf("--endpoints=%s", zoneEndpoints) - - err = joinMultisite(objContext, endpointArg, zoneEndpoints, store.Namespace) - if err != nil { - return errors.Wrapf(err, "failed join ceph multisite in zone %q", objContext.Zone) - } - } else { - endpointArg := fmt.Sprintf("--endpoints=%s", serviceEndpoint) - err := createMultisite(objContext, endpointArg) - if err != nil { - return errorOrIsNotFound(err, "failed create ceph multisite for object-store %q", objContext.Name) - } - } - - logger.Infof("multisite configuration for object-store %v is complete", store.Name) - return nil -} - -func deleteRealm(context *Context) error { - // - realmArg := fmt.Sprintf("--rgw-realm=%s", context.Name) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", context.Name) - _, err := RunAdminCommandNoMultisite(context, false, "realm", "delete", realmArg) - if err != nil { - logger.Warningf("failed to delete rgw realm %q. %v", context.Name, err) - } - - _, err = RunAdminCommandNoMultisite(context, false, "zonegroup", "delete", realmArg, zoneGroupArg) - if err != nil { - logger.Warningf("failed to delete rgw zonegroup %q. %v", context.Name, err) - } - - _, err = runAdminCommand(context, false, "zone", "delete") - if err != nil { - logger.Warningf("failed to delete rgw zone %q. %v", context.Name, err) - } - - return nil -} - -func decodeID(data string) (string, error) { - var id idType - err := json.Unmarshal([]byte(data), &id) - if err != nil { - return "", errors.Wrap(err, "failed to unmarshal json") - } - - return id.ID, err -} - -func DecodeZoneGroupConfig(data string) (zoneGroupType, error) { - var config zoneGroupType - err := json.Unmarshal([]byte(data), &config) - if err != nil { - return config, errors.Wrap(err, "failed to unmarshal json") - } - - return config, err -} - -func getObjectStores(context *Context) ([]string, error) { - output, err := RunAdminCommandNoMultisite(context, true, "realm", "list") - if err != nil { - // This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet - // The caller can nicely handle the error and not overflow the op logs with misleading error messages - if kerrors.IsNotFound(err) { - return []string{}, err - } - // exit status 2 indicates the object store does not exist, so return nothing - if strings.Index(err.Error(), "exit status 2") == 0 { - return []string{}, nil - } - return nil, err - } - - var r realmType - err = json.Unmarshal([]byte(output), &r) - if err != nil { - return nil, errors.Wrap(err, "Failed to unmarshal realms") - } - - return r.Realms, nil -} - -func deletePools(ctx *Context, spec cephv1.ObjectStoreSpec, lastStore bool) error { - if emptyPool(spec.DataPool) && emptyPool(spec.MetadataPool) { - logger.Info("skipping removal of pools since not specified in the object store") - return nil - } - - pools := append(metadataPools, dataPoolName) - if lastStore { - pools = append(pools, rootPool) - } - - if configurePoolsConcurrently() { - waitGroup, _ := errgroup.WithContext(context.TODO()) - for _, pool := range pools { - name := poolName(ctx.Name, pool) - waitGroup.Go(func() error { - if err := cephclient.DeletePool(ctx.Context, ctx.clusterInfo, name); err != nil { - return errors.Wrapf(err, "failed to delete pool %q. ", name) - } - return nil - }, - ) - } - - // Wait for all the pools to be deleted - if err := waitGroup.Wait(); err != nil { - logger.Warning(err) - } - - } else { - for _, pool := range pools { - name := poolName(ctx.Name, pool) - if err := cephclient.DeletePool(ctx.Context, ctx.clusterInfo, name); err != nil { - logger.Warningf("failed to delete pool %q. %v", name, err) - } - } - } - - // Delete erasure code profile if any - erasureCodes, err := cephclient.ListErasureCodeProfiles(ctx.Context, ctx.clusterInfo) - if err != nil { - return errors.Wrapf(err, "failed to list erasure code profiles for cluster %s", ctx.clusterInfo.Namespace) - } - // cleans up the EC profile for the data pool only. Metadata pools don't support EC (only replication is supported). - ecProfileName := cephclient.GetErasureCodeProfileForPool(ctx.Name) - for i := range erasureCodes { - if erasureCodes[i] == ecProfileName { - if err := cephclient.DeleteErasureCodeProfile(ctx.Context, ctx.clusterInfo, ecProfileName); err != nil { - return errors.Wrapf(err, "failed to delete erasure code profile %s for object store %s", ecProfileName, ctx.Name) - } - break - } - } - - return nil -} - -func allObjectPools(storeName string) []string { - baseObjPools := append(metadataPools, dataPoolName, rootPool) - - poolsForThisStore := make([]string, 0, len(baseObjPools)) - for _, p := range baseObjPools { - poolsForThisStore = append(poolsForThisStore, poolName(storeName, p)) - } - return poolsForThisStore -} - -func missingPools(context *Context) ([]string, error) { - // list pools instead of querying each pool individually. querying each individually makes it - // hard to determine if an error is because the pool does not exist or because of a connection - // issue with ceph mons (or some other underlying issue). if listing pools fails, we can be sure - // it is a connection issue and return an error. - existingPoolSummaries, err := cephclient.ListPoolSummaries(context.Context, context.clusterInfo) - if err != nil { - return []string{}, errors.Wrapf(err, "failed to determine if pools are missing. failed to list pools") - } - existingPools := sets.NewString() - for _, summary := range existingPoolSummaries { - existingPools.Insert(summary.Name) - } - - missingPools := []string{} - for _, objPool := range allObjectPools(context.Name) { - if !existingPools.Has(objPool) { - missingPools = append(missingPools, objPool) - } - } - - return missingPools, nil -} - -func CreatePools(context *Context, clusterSpec *cephv1.ClusterSpec, metadataPool, dataPool cephv1.PoolSpec) error { - if emptyPool(dataPool) && emptyPool(metadataPool) { - logger.Info("no pools specified for the CR, checking for their existence...") - missingPools, err := missingPools(context) - if err != nil { - return err - } - if len(missingPools) > 0 { - return fmt.Errorf("CR store pools are missing: %v", missingPools) - } - } - - // get the default PG count for rgw metadata pools - metadataPoolPGs, err := config.GetMonStore(context.Context, context.clusterInfo).Get("mon.", "rgw_rados_pool_pg_num_min") - if err != nil { - logger.Warningf("failed to adjust the PG count for rgw metadata pools. using the general default. %v", err) - metadataPoolPGs = cephclient.DefaultPGCount - } - - if err := createSimilarPools(context, append(metadataPools, rootPool), clusterSpec, metadataPool, metadataPoolPGs, ""); err != nil { - return errors.Wrap(err, "failed to create metadata pools") - } - - ecProfileName := "" - if dataPool.IsErasureCoded() { - ecProfileName = cephclient.GetErasureCodeProfileForPool(context.Name) - // create a new erasure code profile for the data pool - if err := cephclient.CreateErasureCodeProfile(context.Context, context.clusterInfo, ecProfileName, dataPool); err != nil { - return errors.Wrap(err, "failed to create erasure code profile") - } - } - - if err := createSimilarPools(context, []string{dataPoolName}, clusterSpec, dataPool, cephclient.DefaultPGCount, ecProfileName); err != nil { - return errors.Wrap(err, "failed to create data pool") - } - - return nil -} - -// configurePoolsConcurrently checks if operator pod resources are set or not -func configurePoolsConcurrently() bool { - // if operator resources are specified return false as it will lead to operator pod killed due to resource limit - // nolint #S1008, we can safely suppress this - if os.Getenv("OPERATOR_RESOURCES_SPECIFIED") == "true" { - return false - } - return true -} - -func createSimilarPools(ctx *Context, pools []string, clusterSpec *cephv1.ClusterSpec, poolSpec cephv1.PoolSpec, pgCount, ecProfileName string) error { - // We have concurrency - if configurePoolsConcurrently() { - waitGroup, _ := errgroup.WithContext(context.TODO()) - for _, pool := range pools { - // Avoid the loop re-using the same value with a closure - pool := pool - - waitGroup.Go(func() error { return createRGWPool(ctx, clusterSpec, poolSpec, pgCount, ecProfileName, pool) }) - } - return waitGroup.Wait() - } - - // No concurrency! - for _, pool := range pools { - err := createRGWPool(ctx, clusterSpec, poolSpec, pgCount, ecProfileName, pool) - if err != nil { - return err - } - } - - return nil -} - -func createRGWPool(ctx *Context, clusterSpec *cephv1.ClusterSpec, poolSpec cephv1.PoolSpec, pgCount, ecProfileName, pool string) error { - // create the pool if it doesn't exist yet - name := poolName(ctx.Name, pool) - if poolDetails, err := cephclient.GetPoolDetails(ctx.Context, ctx.clusterInfo, name); err != nil { - // If the ceph config has an EC profile, an EC pool must be created. Otherwise, it's necessary - // to create a replicated pool. - var err error - if poolSpec.IsErasureCoded() { - // An EC pool backing an object store does not need to enable EC overwrites, so the pool is - // created with that property disabled to avoid unnecessary performance impact. - err = cephclient.CreateECPoolForApp(ctx.Context, ctx.clusterInfo, name, ecProfileName, poolSpec, pgCount, AppName, false /* enableECOverwrite */) - } else { - err = cephclient.CreateReplicatedPoolForApp(ctx.Context, ctx.clusterInfo, clusterSpec, name, poolSpec, pgCount, AppName) - } - if err != nil { - return errors.Wrapf(err, "failed to create pool %s for object store %s.", name, ctx.Name) - } - } else { - // pools already exist - if poolSpec.IsReplicated() { - // detect if the replication is different from the pool details - if poolDetails.Size != poolSpec.Replicated.Size { - logger.Infof("pool size is changed from %d to %d", poolDetails.Size, poolSpec.Replicated.Size) - if err := cephclient.SetPoolReplicatedSizeProperty(ctx.Context, ctx.clusterInfo, poolDetails.Name, strconv.FormatUint(uint64(poolSpec.Replicated.Size), 10)); err != nil { - return errors.Wrapf(err, "failed to set size property to replicated pool %q to %d", poolDetails.Name, poolSpec.Replicated.Size) - } - } - } - } - // Set the pg_num_min if not the default so the autoscaler won't immediately increase the pg count - if pgCount != cephclient.DefaultPGCount { - if err := cephclient.SetPoolProperty(ctx.Context, ctx.clusterInfo, name, "pg_num_min", pgCount); err != nil { - return errors.Wrapf(err, "failed to set pg_num_min on pool %q to %q", name, pgCount) - } - } - - return nil -} - -func poolName(storeName, poolName string) string { - if strings.HasPrefix(poolName, ".") { - return poolName - } - // the name of the pool is ., except for the pool ".rgw.root" that spans object stores - return fmt.Sprintf("%s.%s", storeName, poolName) -} - -// GetObjectBucketProvisioner returns the bucket provisioner name appended with operator namespace if OBC is watching on it -func GetObjectBucketProvisioner(c *clusterd.Context, namespace string) string { - provName := bucketProvisionerName - obcWatchOnNamespace, err := k8sutil.GetOperatorSetting(c.Clientset, opcontroller.OperatorSettingConfigMapName, "ROOK_OBC_WATCH_OPERATOR_NAMESPACE", "false") - if err != nil { - logger.Warning("failed to verify if obc should watch the operator namespace or all of them, watching all") - } else { - if strings.EqualFold(obcWatchOnNamespace, "true") { - provName = fmt.Sprintf("%s.%s", namespace, bucketProvisionerName) - } - } - return provName -} - -// CheckDashboardUser returns true if the user is configure else return false -func checkDashboardUser(context *Context) (bool, error) { - args := []string{"dashboard", "get-rgw-api-access-key"} - cephCmd := cephclient.NewCephCommand(context.Context, context.clusterInfo, args) - out, err := cephCmd.Run() - - if string(out) != "" { - return true, err - } - - return false, err -} - -func enableRGWDashboard(context *Context) error { - logger.Info("enabling rgw dashboard") - checkDashboard, err := checkDashboardUser(context) - if err != nil { - logger.Debug("Unable to fetch dashboard user key for RGW, hence skipping") - return nil - } - if checkDashboard { - logger.Debug("RGW Dashboard is already enabled") - return nil - } - user := ObjectUser{ - UserID: DashboardUser, - DisplayName: &DashboardUser, - SystemUser: true, - } - // TODO: - // Use admin ops user instead! - u, errCode, err := CreateUser(context, user) - if err != nil || errCode != 0 { - return errors.Wrapf(err, "failed to create user %q", DashboardUser) - } - - var accessArgs, secretArgs []string - var secretFile *os.File - - // for latest Ceph versions - if mgr.FileBasedPasswordSupported(context.clusterInfo) { - accessFile, err := mgr.CreateTempPasswordFile(*u.AccessKey) - if err != nil { - return errors.Wrap(err, "failed to create a temporary dashboard access-key file") - } - - accessArgs = []string{"dashboard", "set-rgw-api-access-key", "-i", accessFile.Name()} - defer func() { - if err := os.Remove(accessFile.Name()); err != nil { - logger.Errorf("failed to clean up dashboard access-key file. %v", err) - } - }() - - secretFile, err = mgr.CreateTempPasswordFile(*u.SecretKey) - if err != nil { - return errors.Wrap(err, "failed to create a temporary dashboard secret-key file") - } - - secretArgs = []string{"dashboard", "set-rgw-api-secret-key", "-i", secretFile.Name()} - } else { - // for older Ceph versions - accessArgs = []string{"dashboard", "set-rgw-api-access-key", *u.AccessKey} - secretArgs = []string{"dashboard", "set-rgw-api-secret-key", *u.SecretKey} - } - - cephCmd := cephclient.NewCephCommand(context.Context, context.clusterInfo, accessArgs) - _, err = cephCmd.Run() - if err != nil { - return errors.Wrapf(err, "failed to set user %q accesskey", DashboardUser) - } - - cephCmd = cephclient.NewCephCommand(context.Context, context.clusterInfo, secretArgs) - go func() { - // Setting the dashboard api secret started hanging in some clusters - // starting in ceph v15.2.8. We run it in a goroutine until the fix - // is found. We expect the ceph command to timeout so at least the goroutine exits. - logger.Info("setting the dashboard api secret key") - _, err = cephCmd.RunWithTimeout(exec.CephCommandsTimeout) - if err != nil { - logger.Errorf("failed to set user %q secretkey. %v", DashboardUser, err) - } - if mgr.FileBasedPasswordSupported(context.clusterInfo) { - if err := os.Remove(secretFile.Name()); err != nil { - logger.Errorf("failed to clean up dashboard secret-key file. %v", err) - } - } - - logger.Info("done setting the dashboard api secret key") - }() - - return nil -} - -func disableRGWDashboard(context *Context) { - logger.Info("disabling the dashboard api user and secret key") - - _, _, err := GetUser(context, DashboardUser) - if err != nil { - logger.Infof("unable to fetch the user %q details from this objectstore %q", DashboardUser, context.Name) - } else { - logger.Info("deleting rgw dashboard user") - _, err = DeleteUser(context, DashboardUser) - if err != nil { - logger.Warningf("failed to delete ceph user %q. %v", DashboardUser, err) - } - } - - args := []string{"dashboard", "reset-rgw-api-access-key"} - cephCmd := cephclient.NewCephCommand(context.Context, context.clusterInfo, args) - _, err = cephCmd.RunWithTimeout(exec.CephCommandsTimeout) - if err != nil { - logger.Warningf("failed to reset user accesskey for user %q. %v", DashboardUser, err) - } - - args = []string{"dashboard", "reset-rgw-api-secret-key"} - cephCmd = cephclient.NewCephCommand(context.Context, context.clusterInfo, args) - _, err = cephCmd.RunWithTimeout(exec.CephCommandsTimeout) - if err != nil { - logger.Warningf("failed to reset user secretkey for user %q. %v", DashboardUser, err) - } - logger.Info("done disabling the dashboard api secret key") -} - -func errorOrIsNotFound(err error, msg string, args ...string) error { - // This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet - // The caller can nicely handle the error and not overflow the op logs with misleading error messages - if kerrors.IsNotFound(err) { - return err - } - return errors.Wrapf(err, msg, args) -} diff --git a/pkg/operator/ceph/object/objectstore_test.go b/pkg/operator/ceph/object/objectstore_test.go deleted file mode 100644 index 4c79be443..000000000 --- a/pkg/operator/ceph/object/objectstore_test.go +++ /dev/null @@ -1,299 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/k8sutil" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" -) - -const ( - dashboardAdminCreateJSON = `{ - "user_id": "dashboard-admin", - "display_name": "dashboard-admin", - "email": "", - "suspended": 0, - "max_buckets": 1000, - "subusers": [], - "keys": [ - { - "user": "dashboard-admin", - "access_key": "VFKF8SSU9L3L2UR03Z8C", - "secret_key": "5U4e2MkXHgXstfWkxGZOI6AXDfVUkDDHM7Dwc3mY" - } - ], - "swift_keys": [], - "caps": [], - "op_mask": "read, write, delete", - "system": "true", - "temp_url_keys": [], - "type": "rgw", - "mfa_ids": [] -}` - access_key = "VFKF8SSU9L3L2UR03Z8C" -) - -func TestReconcileRealm(t *testing.T) { - executorFunc := func(command string, args ...string) (string, error) { - idResponse := `{"id":"test-id"}` - logger.Infof("Execute: %s %v", command, args) - return idResponse, nil - } - executorFuncTimeout := func(timeout time.Duration, command string, args ...string) (string, error) { - testResponse := `{"id": "test-id"}` - logger.Infof("Execute: %s %v", command, args) - return testResponse, nil - } - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: executorFunc, - MockExecuteCommandWithCombinedOutput: executorFunc, - MockExecuteCommandWithTimeout: executorFuncTimeout, - } - - storeName := "myobject" - context := &clusterd.Context{Executor: executor} - objContext := NewContext(context, &client.ClusterInfo{Namespace: "mycluster"}, storeName) - // create the first realm, marked as default - store := cephv1.CephObjectStore{} - err := setMultisite(objContext, &store, "1.2.3.4") - assert.Nil(t, err) - - // create the second realm, not marked as default - err = setMultisite(objContext, &store, "2.3.4.5") - assert.Nil(t, err) -} - -func TestDeleteStore(t *testing.T) { - deleteStore(t, "myobj", `"mystore","myobj"`, false) - deleteStore(t, "myobj", `"myobj"`, true) -} - -func deleteStore(t *testing.T, name string, existingStores string, expectedDeleteRootPool bool) { - realmDeleted := false - zoneDeleted := false - zoneGroupDeleted := false - poolsDeleted := 0 - rulesDeleted := 0 - executor := &exectest.MockExecutor{} - deletedRootPool := false - deletedErasureCodeProfile := false - mockExecutorFuncOutput := func(command string, args ...string) (string, error) { - if args[0] == "osd" { - if args[1] == "pool" { - if args[2] == "get" { - return `{"pool_id":1}`, nil - } - if args[2] == "delete" { - poolsDeleted++ - if args[3] == rootPool { - deletedRootPool = true - } - return "", nil - } - } - if args[1] == "crush" { - assert.Equal(t, "rule", args[2]) - assert.Equal(t, "rm", args[3]) - rulesDeleted++ - return "", nil - } - if args[1] == "erasure-code-profile" { - if args[2] == "ls" { - return `["default","myobj_ecprofile"]`, nil - } - if args[2] == "rm" { - if args[3] == "myobj_ecprofile" { - deletedErasureCodeProfile = true - } else { - assert.Fail(t, fmt.Sprintf("the erasure code profile to be deleted should be myobj_ecprofile. Actual: %s ", args[3])) - } - return "", nil - } - } - } - if args[0] == "realm" { - if args[1] == "delete" { - realmDeleted = true - return "", nil - } - if args[1] == "list" { - return fmt.Sprintf(`{"realms":[%s]}`, existingStores), nil - } - } - if args[0] == "zonegroup" { - assert.Equal(t, "delete", args[1]) - zoneGroupDeleted = true - return "", nil - } - if args[0] == "zone" { - assert.Equal(t, "delete", args[1]) - zoneDeleted = true - return "", nil - } - - if args[0] == "pool" { - if args[1] == "stats" { - emptyPool := "{\"images\":{\"count\":0,\"provisioned_bytes\":0,\"snap_count\":0},\"trash\":{\"count\":1,\"provisioned_bytes\":2048,\"snap_count\":0}}" - return emptyPool, nil - } - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - executorFuncWithTimeout := func(timeout time.Duration, command string, args ...string) (string, error) { - return mockExecutorFuncOutput(command, args...) - } - executorFunc := func(command string, args ...string) (string, error) { - return mockExecutorFuncOutput(command, args...) - } - - executor.MockExecuteCommandWithTimeout = executorFuncWithTimeout - executor.MockExecuteCommandWithOutput = executorFunc - executor.MockExecuteCommandWithCombinedOutput = executorFunc - context := &Context{Context: &clusterd.Context{Executor: executor}, Name: "myobj", clusterInfo: &client.ClusterInfo{Namespace: "ns"}} - - // Delete an object store without deleting the pools - spec := cephv1.ObjectStoreSpec{} - err := deleteRealmAndPools(context, spec) - assert.Nil(t, err) - expectedPoolsDeleted := 0 - assert.Equal(t, expectedPoolsDeleted, poolsDeleted) - assert.Equal(t, expectedPoolsDeleted, rulesDeleted) - assert.True(t, realmDeleted) - assert.True(t, zoneGroupDeleted) - assert.True(t, zoneDeleted) - assert.Equal(t, false, deletedErasureCodeProfile) - - // Delete an object store with the pools - spec = cephv1.ObjectStoreSpec{ - MetadataPool: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1}}, - DataPool: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1}}, - } - err = deleteRealmAndPools(context, spec) - assert.Nil(t, err) - expectedPoolsDeleted = 6 - if expectedDeleteRootPool { - expectedPoolsDeleted++ - } - assert.Equal(t, expectedPoolsDeleted, poolsDeleted) - assert.Equal(t, expectedDeleteRootPool, deletedRootPool) - assert.Equal(t, true, deletedErasureCodeProfile) -} - -func TestGetObjectBucketProvisioner(t *testing.T) { - ctx := context.TODO() - k8s := fake.NewSimpleClientset() - operatorSettingConfigMapName := "rook-ceph-operator-config" - testNamespace := "test-namespace" - watchOperatorNamespace := map[string]string{"ROOK_OBC_WATCH_OPERATOR_NAMESPACE": "true"} - ignoreOperatorNamespace := map[string]string{"ROOK_OBC_WATCH_OPERATOR_NAMESPACE": "false"} - context := &clusterd.Context{Clientset: k8s} - os.Setenv(k8sutil.PodNamespaceEnvVar, testNamespace) - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: operatorSettingConfigMapName, - Namespace: testNamespace, - }, - Data: watchOperatorNamespace, - } - - _, err := k8s.CoreV1().ConfigMaps(testNamespace).Create(ctx, cm, metav1.CreateOptions{}) - assert.NoError(t, err) - - bktprovisioner := GetObjectBucketProvisioner(context, testNamespace) - assert.Equal(t, fmt.Sprintf("%s.%s", testNamespace, bucketProvisionerName), bktprovisioner) - - cm.Data = ignoreOperatorNamespace - _, err = k8s.CoreV1().ConfigMaps(testNamespace).Update(ctx, cm, metav1.UpdateOptions{}) - assert.NoError(t, err) - - bktprovisioner = GetObjectBucketProvisioner(context, testNamespace) - assert.Equal(t, bucketProvisionerName, bktprovisioner) -} - -func TestDashboard(t *testing.T) { - storeName := "myobject" - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "user" { - return dashboardAdminCreateJSON, nil - } - return "", nil - }, - } - context := &clusterd.Context{Executor: executor} - objContext := NewContext(context, &client.ClusterInfo{Namespace: "mycluster", - CephVersion: cephver.CephVersion{Major: 15, Minor: 2, Extra: 9}}, - storeName) - checkdashboard, err := checkDashboardUser(objContext) - assert.NoError(t, err) - assert.False(t, checkdashboard) - err = enableRGWDashboard(objContext) - assert.Nil(t, err) - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "dashboard" && args[1] == "get-rgw-api-access-key" { - return access_key, nil - } - return "", nil - }, - } - objContext.Context.Executor = executor - checkdashboard, err = checkDashboardUser(objContext) - assert.NoError(t, err) - assert.True(t, checkdashboard) - disableRGWDashboard(objContext) - - context = &clusterd.Context{Executor: executor} - objContext = NewContext(context, &client.ClusterInfo{Namespace: "mycluster", - CephVersion: cephver.CephVersion{Major: 15, Minor: 2, Extra: 10}}, - storeName) - err = enableRGWDashboard(objContext) - assert.Nil(t, err) - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "dashboard" && args[1] == "get-rgw-api-access-key" { - return access_key, nil - } - return "", nil - }, - } - objContext.Context.Executor = executor - checkdashboard, err = checkDashboardUser(objContext) - assert.NoError(t, err) - assert.True(t, checkdashboard) - disableRGWDashboard(objContext) -} diff --git a/pkg/operator/ceph/object/policy.go b/pkg/operator/ceph/object/policy.go deleted file mode 100644 index 647b1933f..000000000 --- a/pkg/operator/ceph/object/policy.go +++ /dev/null @@ -1,325 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/service/s3" - "k8s.io/apimachinery/pkg/util/json" -) - -type action string - -const ( - All action = "s3:*" - AbortMultipartUpload action = "s3:AbortMultipartUpload" - CreateBucket action = "s3:CreateBucket" - DeleteBucketPolicy action = "s3:DeleteBucketPolicy" - DeleteBucket action = "s3:DeleteBucket" - DeleteBucketWebsite action = "s3:DeleteBucketWebsite" - DeleteObject action = "s3:DeleteObject" - DeleteObjectVersion action = "s3:DeleteObjectVersion" - DeleteReplicationConfiguration action = "s3:DeleteReplicationConfiguration" - GetAccelerateConfiguration action = "s3:GetAccelerateConfiguration" - GetBucketAcl action = "s3:GetBucketAcl" - GetBucketCORS action = "s3:GetBucketCORS" - GetBucketLocation action = "s3:GetBucketLocation" - GetBucketLogging action = "s3:GetBucketLogging" - GetBucketNotification action = "s3:GetBucketNotification" - GetBucketPolicy action = "s3:GetBucketPolicy" - GetBucketRequestPayment action = "s3:GetBucketRequestPayment" - GetBucketTagging action = "s3:GetBucketTagging" - GetBucketVersioning action = "s3:GetBucketVersioning" - GetBucketWebsite action = "s3:GetBucketWebsite" - GetLifecycleConfiguration action = "s3:GetLifecycleConfiguration" - GetObjectAcl action = "s3:GetObjectAcl" - GetObject action = "s3:GetObject" - GetObjectTorrent action = "s3:GetObjectTorrent" - GetObjectVersionAcl action = "s3:GetObjectVersionAcl" - GetObjectVersion action = "s3:GetObjectVersion" - GetObjectVersionTorrent action = "s3:GetObjectVersionTorrent" - GetReplicationConfiguration action = "s3:GetReplicationConfiguration" - ListAllMyBuckets action = "s3:ListAllMyBuckets" - ListBucketMultiPartUploads action = "s3:ListBucketMultiPartUploads" - ListBucket action = "s3:ListBucket" - ListBucketVersions action = "s3:ListBucketVersions" - ListMultipartUploadParts action = "s3:ListMultipartUploadParts" - PutAccelerateConfiguration action = "s3:PutAccelerateConfiguration" - PutBucketAcl action = "s3:PutBucketAcl" - PutBucketCORS action = "s3:PutBucketCORS" - PutBucketLogging action = "s3:PutBucketLogging" - PutBucketNotification action = "s3:PutBucketNotification" - PutBucketPolicy action = "s3:PutBucketPolicy" - PutBucketRequestPayment action = "s3:PutBucketRequestPayment" - PutBucketTagging action = "s3:PutBucketTagging" - PutBucketVersioning action = "s3:PutBucketVersioning" - PutBucketWebsite action = "s3:PutBucketWebsite" - PutLifecycleConfiguration action = "s3:PutLifecycleConfiguration" - PutObjectAcl action = "s3:PutObjectAcl" - PutObject action = "s3:PutObject" - PutObjectVersionAcl action = "s3:PutObjectVersionAcl" - PutReplicationConfiguration action = "s3:PutReplicationConfiguration" - RestoreObject action = "s3:RestoreObject" -) - -// AllowedActions is a lenient default list of actions -var AllowedActions = []action{ - DeleteObject, - DeleteObjectVersion, - GetBucketAcl, - GetBucketCORS, - GetBucketLocation, - GetBucketLogging, - GetBucketNotification, - GetBucketTagging, - GetBucketVersioning, - GetBucketWebsite, - GetObject, - GetObjectAcl, - GetObjectTorrent, - GetObjectVersion, - GetObjectVersionAcl, - GetObjectVersionTorrent, - ListAllMyBuckets, - ListBucket, - ListBucketMultiPartUploads, - ListBucketVersions, - ListMultipartUploadParts, - PutBucketTagging, - PutBucketVersioning, - PutBucketWebsite, - PutBucketVersioning, - PutLifecycleConfiguration, - PutObject, - PutObjectAcl, - PutObjectVersionAcl, - PutReplicationConfiguration, - RestoreObject, -} - -type effect string - -// effectAllow and effectDeny values are expected by the S3 API to be 'Allow' or 'Deny' explicitly -const ( - effectAllow effect = "Allow" - effectDeny effect = "Deny" -) - -// PolicyStatment is the Go representation of a PolicyStatement json struct -// it defines what Actions that a Principle can or cannot perform on a Resource -type PolicyStatement struct { - // Sid (optional) is the PolicyStatement's unique identifier - Sid string `json:"Sid"` - // Effect determines whether the Action(s) are 'Allow'ed or 'Deny'ed. - Effect effect `json:"Effect"` - // Principle is/are the Ceph user names affected by this PolicyStatement - // Must be in the format of 'arn:aws:iam:::user/' - Principal map[string][]string `json:"Principal"` - // Action is a list of s3:* actions - Action []action `json:"Action"` - // Resource is the ARN identifier for the S3 resource (bucket) - // Must be in the format of 'arn:aws:s3:::' - Resource []string `json:"Resource"` -} - -// BucketPolicy represents set of policy statements for a single bucket. -type BucketPolicy struct { - // Id (optional) identifies the bucket policy - Id string `json:"Id"` - // Version is the version of the BucketPolicy data structure - // should always be '2012-10-17' - Version string `json:"Version"` - Statement []PolicyStatement `json:"Statement"` -} - -// the version of the BucketPolicy json structure -const version = "2012-10-17" - -// NewBucketPolicy obviously returns a new BucketPolicy. PolicyStatements may be passed in at creation -// or added after the fact. BucketPolicies should be passed to PutBucketPolicy(). -func NewBucketPolicy(ps ...PolicyStatement) *BucketPolicy { - bp := &BucketPolicy{ - Version: version, - Statement: append([]PolicyStatement{}, ps...), - } - return bp -} - -// PutBucketPolicy applies the policy to the bucket -func (s *S3Agent) PutBucketPolicy(bucket string, policy BucketPolicy) (*s3.PutBucketPolicyOutput, error) { - - confirmRemoveSelfBucketAccess := false - serializedPolicy, _ := json.Marshal(policy) - consumablePolicy := string(serializedPolicy) - - p := &s3.PutBucketPolicyInput{ - Bucket: &bucket, - ConfirmRemoveSelfBucketAccess: &confirmRemoveSelfBucketAccess, - Policy: &consumablePolicy, - } - out, err := s.Client.PutBucketPolicy(p) - if err != nil { - return out, err - } - return out, nil -} - -func (s *S3Agent) GetBucketPolicy(bucket string) (*BucketPolicy, error) { - out, err := s.Client.GetBucketPolicy(&s3.GetBucketPolicyInput{ - Bucket: &bucket, - }) - if err != nil { - return nil, err - } - - policy := &BucketPolicy{} - err = json.Unmarshal([]byte(*out.Policy), policy) - if err != nil { - return nil, err - } - return policy, nil -} - -// ModifyBucketPolicy new and old statement SIDs and overwrites on a match. -// This allows users to Get, modify, and Replace existing statements as well as -// add new ones. -func (bp *BucketPolicy) ModifyBucketPolicy(ps ...PolicyStatement) *BucketPolicy { - for _, newP := range ps { - var match bool - for j, oldP := range bp.Statement { - if newP.Sid == oldP.Sid { - bp.Statement[j] = newP - } - } - if !match { - bp.Statement = append(bp.Statement, newP) - } - } - return bp -} - -func (bp *BucketPolicy) DropPolicyStatements(sid ...string) *BucketPolicy { - for _, s := range sid { - for i, stmt := range bp.Statement { - if stmt.Sid == s { - bp.Statement = append(bp.Statement[:i], bp.Statement[i+1:]...) - break - } - } - } - return bp -} - -func (bp *BucketPolicy) EjectPrincipals(users ...string) *BucketPolicy { - statements := bp.Statement - for _, s := range statements { - s.EjectPrincipals(users...) - } - bp.Statement = statements - return bp -} - -// NewPolicyStatement generates a new PolicyStatement. PolicyStatment methods are designed to -// be chain called with dot notation to allow for easy configuration at creation. This is preferable -// to a long parameter list. -func NewPolicyStatement() *PolicyStatement { - return &PolicyStatement{ - Sid: "", - Effect: "", - Principal: map[string][]string{}, - Action: []action{}, - Resource: []string{}, - } -} - -func (ps *PolicyStatement) WithSID(sid string) *PolicyStatement { - ps.Sid = sid - return ps -} - -const awsPrinciple = "AWS" -const arnPrefixPrinciple = "arn:aws:iam:::user/%s" -const arnPrefixResource = "arn:aws:s3:::%s" - -// ForPrincipals adds users to the PolicyStatement -func (ps *PolicyStatement) ForPrincipals(users ...string) *PolicyStatement { - principals := ps.Principal[awsPrinciple] - for _, u := range users { - principals = append(principals, fmt.Sprintf(arnPrefixPrinciple, u)) - } - ps.Principal[awsPrinciple] = principals - return ps -} - -// ForResources adds resources (buckets) to the PolicyStatement with the appropriate ARN prefix -func (ps *PolicyStatement) ForResources(resources ...string) *PolicyStatement { - for _, v := range resources { - ps.Resource = append(ps.Resource, fmt.Sprintf(arnPrefixResource, v)) - } - return ps -} - -// ForSubResources add contents inside the bucket to the PolicyStatement with the appropriate ARN prefix -func (ps *PolicyStatement) ForSubResources(resources ...string) *PolicyStatement { - var subresource string - for _, v := range resources { - subresource = fmt.Sprintf("%s/*", v) - ps.Resource = append(ps.Resource, fmt.Sprintf(arnPrefixResource, subresource)) - } - return ps -} - -// Allows sets the effect of the PolicyStatement to allow PolicyStatement's Actions -func (ps *PolicyStatement) Allows() *PolicyStatement { - if ps.Effect != "" { - return ps - } - ps.Effect = effectAllow - return ps -} - -// Denies sets the effect of the PolicyStatement to deny the PolicyStatement's Actions -func (ps *PolicyStatement) Denies() *PolicyStatement { - if ps.Effect != "" { - return ps - } - ps.Effect = effectDeny - return ps -} - -// Actions is the set of "s3:*" actions for the PolicyStatement is concerned -func (ps *PolicyStatement) Actions(actions ...action) *PolicyStatement { - ps.Action = actions - return ps -} - -func (ps *PolicyStatement) EjectPrincipals(users ...string) { - principals := ps.Principal[awsPrinciple] - for _, u := range users { - for j, v := range principals { - if u == v { - principals = append(principals[:j], principals[:j+1]...) - } - } - } - ps.Principal[awsPrinciple] = principals -} - -// ////////////// -// End Policy -// ////////////// diff --git a/pkg/operator/ceph/object/realm/controller.go b/pkg/operator/ceph/object/realm/controller.go deleted file mode 100644 index 6cb5e6a22..000000000 --- a/pkg/operator/ceph/object/realm/controller.go +++ /dev/null @@ -1,342 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package objectrealm to manage a rook object realm. -package realm - -import ( - "context" - "encoding/base64" - "fmt" - "reflect" - "syscall" - "time" - - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" - "github.com/rook/rook/pkg/operator/ceph/object" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - controllerName = "ceph-object-realm-controller" - accessKeyLength = 14 - secretKeyLength = 28 -) - -var waitForRequeueIfRealmNotReady = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -var cephObjectRealmKind = reflect.TypeOf(cephv1.CephObjectRealm{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephObjectRealmKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -// ReconcileObjectRealm reconciles a ObjectRealm object -type ReconcileObjectRealm struct { - client client.Client - scheme *runtime.Scheme - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo -} - -// Add creates a new CephObjectRealm Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - - return &ReconcileObjectRealm{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the CephObjectRealm CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephObjectRealm{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a CephObjectRealm object and makes changes based on the state read -// and what is in the CephObjectRealm.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileObjectRealm) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, err := r.reconcile(request) - if err != nil { - logger.Errorf("failed to reconcile: %v", err) - } - - return reconcileResponse, err -} - -func (r *ReconcileObjectRealm) reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the CephObjectRealm instance - cephObjectRealm := &cephv1.CephObjectRealm{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephObjectRealm) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephObjectRealm resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, errors.Wrap(err, "failed to get CephObjectRealm") - } - - // The CR was just created, initializing status fields - if cephObjectRealm.Status == nil { - updateStatus(r.client, request.NamespacedName, k8sutil.EmptyStatus) - } - - // Make sure a CephCluster is present otherwise do nothing - _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - // This handles the case where the Ceph Cluster is gone and we want to delete that CR - if !cephObjectRealm.GetDeletionTimestamp().IsZero() && !cephClusterExists { - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - return reconcileResponse, nil - } - - // DELETE: the CR was deleted - if !cephObjectRealm.GetDeletionTimestamp().IsZero() { - logger.Debugf("deleting realm CR %q", cephObjectRealm.Name) - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - - // Populate clusterInfo during each reconcile - r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to populate cluster info") - } - - // validate the realm settings - err = validateRealmCR(cephObjectRealm) - if err != nil { - updateStatus(r.client, request.NamespacedName, k8sutil.ReconcileFailedStatus) - return reconcile.Result{}, errors.Wrapf(err, "invalid CephObjectRealm CR %q", cephObjectRealm.Name) - } - - // Start object reconciliation, updating status for this - updateStatus(r.client, request.NamespacedName, k8sutil.ReconcilingStatus) - - // Create/Pull Ceph Realm - if cephObjectRealm.Spec.IsPullRealm() { - logger.Debug("pull section in spec found") - _, err = r.pullCephRealm(cephObjectRealm) - if err != nil { - return reconcile.Result{}, err - } - } else { - _, err = r.createRealmKeys(cephObjectRealm) - if err != nil { - return r.setFailedStatus(request.NamespacedName, "failed to create keys for realm", err) - } - - _, err = r.createCephRealm(cephObjectRealm) - if err != nil { - return r.setFailedStatus(request.NamespacedName, "failed to create ceph realm", err) - } - } - - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, k8sutil.ReadyStatus) - - // Return and do not requeue - logger.Debug("realm done reconciling") - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectRealm) pullCephRealm(realm *cephv1.CephObjectRealm) (reconcile.Result, error) { - realmArg := fmt.Sprintf("--rgw-realm=%s", realm.Name) - urlArg := fmt.Sprintf("--url=%s", realm.Spec.Pull.Endpoint) - logger.Debug("getting keys to pull realm") - accessKeyArg, secretKeyArg, err := object.GetRealmKeyArgs(r.context, realm.Name, realm.Namespace) - if err != nil { - if kerrors.IsNotFound(err) { - return waitForRequeueIfRealmNotReady, err - } - return waitForRequeueIfRealmNotReady, errors.Wrap(err, "failed to get keys for realm") - } - logger.Debugf("keys found to pull realm, getting ready to pull from endpoint %q", realm.Spec.Pull.Endpoint) - - objContext := object.NewContext(r.context, r.clusterInfo, realm.Name) - output, err := object.RunAdminCommandNoMultisite(objContext, false, "realm", "pull", realmArg, urlArg, accessKeyArg, secretKeyArg) - - if err != nil { - return waitForRequeueIfRealmNotReady, errors.Wrapf(err, "realm pull failed for reason: %v", output) - } - logger.Debugf("realm pull for %q from endpoint %q succeeded", realm.Name, realm.Spec.Pull.Endpoint) - - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectRealm) createCephRealm(realm *cephv1.CephObjectRealm) (reconcile.Result, error) { - realmArg := fmt.Sprintf("--rgw-realm=%s", realm.Name) - objContext := object.NewContext(r.context, r.clusterInfo, realm.Namespace) - - _, err := object.RunAdminCommandNoMultisite(objContext, true, "realm", "get", realmArg) - - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - logger.Debugf("ceph realm %q not found, running `radosgw-admin realm create`", realm.Name) - _, err := object.RunAdminCommandNoMultisite(objContext, false, "realm", "create", realmArg) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to create ceph realm %s", realm.Name) - } - logger.Debugf("created ceph realm %q", realm.Name) - } else { - return reconcile.Result{}, errors.Wrapf(err, "radosgw-admin realm get failed with code %d", code) - } - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectRealm) createRealmKeys(realm *cephv1.CephObjectRealm) (reconcile.Result, error) { - ctx := context.TODO() - logger.Debugf("generating access and secret keys for new realm %q", realm.Name) - - // the realm's secret key and access key are randomly generated and then encoded to base64 - accessKey, err := mgr.GeneratePassword(accessKeyLength) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "access key failed to generate") - } - accessKey = base64.StdEncoding.EncodeToString([]byte(accessKey)) - - secretKey, err := mgr.GeneratePassword(secretKeyLength) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to generate secret key") - } - secretKey = base64.StdEncoding.EncodeToString([]byte(secretKey)) - - logger.Debugf("creating secrets for new realm %q", realm.Name) - - secrets := map[string][]byte{ - object.AccessKeyName: []byte(accessKey), - object.SecretKeyName: []byte(secretKey), - } - - secretName := realm.Name + "-keys" - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: realm.Namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - err = controllerutil.SetControllerReference(realm, secret, r.scheme) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to set owner reference of rgw secret %q", secret.Name) - } - - if _, err = r.context.Clientset.CoreV1().Secrets(realm.Namespace).Create(ctx, secret, metav1.CreateOptions{}); err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to save rgw secrets") - } - logger.Infof("secrets for keys have been created for realm %q", realm.Name) - - return reconcile.Result{}, nil -} - -// validateRealmCR validates the realm arguments -func validateRealmCR(u *cephv1.CephObjectRealm) error { - if u.Name == "" { - return errors.New("missing name") - } - if u.Namespace == "" { - return errors.New("missing namespace") - } - return nil -} - -func (r *ReconcileObjectRealm) setFailedStatus(name types.NamespacedName, errMessage string, err error) (reconcile.Result, error) { - updateStatus(r.client, name, k8sutil.ReconcileFailedStatus) - return reconcile.Result{}, errors.Wrapf(err, "%s", errMessage) -} - -// updateStatus updates an realm with a given status -func updateStatus(client client.Client, name types.NamespacedName, status string) { - objectRealm := &cephv1.CephObjectRealm{} - if err := client.Get(context.TODO(), name, objectRealm); err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephObjectRealm resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve object realm %q to update status to %q. %v", name, status, err) - return - } - if objectRealm.Status == nil { - objectRealm.Status = &cephv1.Status{} - } - - objectRealm.Status.Phase = status - if err := reporting.UpdateStatus(client, objectRealm); err != nil { - logger.Errorf("failed to set object realm %q status to %q. %v", name, status, err) - return - } - logger.Debugf("object realm %q status updated to %q", name, status) -} diff --git a/pkg/operator/ceph/object/realm/controller_test.go b/pkg/operator/ceph/object/realm/controller_test.go deleted file mode 100644 index 057271642..000000000 --- a/pkg/operator/ceph/object/realm/controller_test.go +++ /dev/null @@ -1,262 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package realm to manage a rook object realm. -package realm - -import ( - "context" - "testing" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/operator/test" - - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var ( - name = "realm-a" - namespace = "rook-ceph" - realmGetJSON = `{ - "id": "237e6250-5f7d-4b85-9359-8cb2b1848507", - "name": "realm-a", - "current_period": "df665ecb-1762-47a9-9c66-f938d251c02a", - "epoch": 2 - }` -) - -func TestCephObjectRealmController(t *testing.T) { - ctx := context.TODO() - // - // TEST 1 SETUP - // - // FAILURE because no CephCluster - // - // A Pool resource with metadata and spec. - r, objectRealm := getObjectRealmAndReconcileObjectRealm(t) - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - - // - // TEST 2: - // - // FAILURE we have a cluster but it's not ready - // - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - - object := []runtime.Object{ - objectRealm, - cephCluster, - } - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(r.scheme).WithRuntimeObjects(object...).Build() - // Create a ReconcileObjectRealm object with the scheme and fake client. - r = &ReconcileObjectRealm{client: cl, scheme: r.scheme, context: r.context} - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - - // - // TEST 3: - // - // SUCCESS! The CephCluster is ready and Object Realm is Created - // - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = r.context.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(r.scheme).WithRuntimeObjects(object...).Build() - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "realm" && args[1] == "get" { - return realmGetJSON, nil - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "realm" && args[1] == "get" { - return realmGetJSON, nil - } - return "", nil - }, - } - - r.context.Executor = executor - - // Create a ReconcileObjectRealm object with the scheme and fake client. - r = &ReconcileObjectRealm{client: cl, scheme: r.scheme, context: r.context} - - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, objectRealm) - assert.NoError(t, err) -} - -func TestPullCephRealm(t *testing.T) { - ctx := context.TODO() - r, objectRealm := getObjectRealmAndReconcileObjectRealm(t) - - secrets := map[string][]byte{ - "access-key": []byte("akey"), - "secret-key": []byte("skey"), - } - - secretName := objectRealm.Name + "-keys" - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: objectRealm.Namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - - _, err := r.context.Clientset.CoreV1().Secrets(objectRealm.Namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - objectRealm.Spec.Pull.Endpoint = "http://10.2.1.164:80" - res, err := r.pullCephRealm(objectRealm) - assert.NoError(t, err) - assert.False(t, res.Requeue) -} - -func TestCreateRealmKeys(t *testing.T) { - r, objectRealm := getObjectRealmAndReconcileObjectRealm(t) - - res, err := r.createRealmKeys(objectRealm) - assert.NoError(t, err) - assert.False(t, res.Requeue) -} - -func TestCreateCephRealm(t *testing.T) { - r, objectRealm := getObjectRealmAndReconcileObjectRealm(t) - - res, err := r.createCephRealm(objectRealm) - assert.NoError(t, err) - assert.False(t, res.Requeue) -} - -func getObjectRealmAndReconcileObjectRealm(t *testing.T) (*ReconcileObjectRealm, *cephv1.CephObjectRealm) { - objectRealm := &cephv1.CephObjectRealm{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectRealm", - }, - Spec: cephv1.ObjectRealmSpec{}, - } - cephCluster := &cephv1.CephCluster{} - - // Objects to track in the fake client. - object := []runtime.Object{ - objectRealm, - cephCluster, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "realm" && args[1] == "get" { - return realmGetJSON, nil - } - return "", nil - }, - } - - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectRealm{}, &cephv1.CephCluster{}, &cephv1.CephClusterList{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileObjectRealm object with the scheme and fake client. - clusterInfo := cephclient.AdminClusterInfo("rook") - r := &ReconcileObjectRealm{client: cl, scheme: s, context: c, clusterInfo: clusterInfo} - - return r, objectRealm -} diff --git a/pkg/operator/ceph/object/rgw.go b/pkg/operator/ceph/object/rgw.go deleted file mode 100644 index 10adfa279..000000000 --- a/pkg/operator/ceph/object/rgw.go +++ /dev/null @@ -1,365 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package object for the Ceph object store. -package object - -import ( - "context" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "syscall" - - "github.com/banzaicloud/k8s-objectmatcher/patch" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/pool" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type clusterConfig struct { - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - store *cephv1.CephObjectStore - rookVersion string - clusterSpec *cephv1.ClusterSpec - ownerInfo *k8sutil.OwnerInfo - DataPathMap *config.DataPathMap - client client.Client -} - -type rgwConfig struct { - ResourceName string - DaemonID string - Realm string - ZoneGroup string - Zone string -} - -var updateDeploymentAndWait = mon.UpdateCephDeploymentAndWait - -func (c *clusterConfig) createOrUpdateStore(realmName, zoneGroupName, zoneName string) error { - logger.Infof("creating object store %q in namespace %q", c.store.Name, c.store.Namespace) - - if err := c.startRGWPods(realmName, zoneGroupName, zoneName); err != nil { - return errors.Wrap(err, "failed to start rgw pods") - } - - objContext := NewContext(c.context, c.clusterInfo, c.store.Namespace) - err := enableRGWDashboard(objContext) - if err != nil { - logger.Warningf("failed to enable dashboard for rgw. %v", err) - } - - logger.Infof("created object store %q in namespace %q", c.store.Name, c.store.Namespace) - return nil -} - -func (c *clusterConfig) startRGWPods(realmName, zoneGroupName, zoneName string) error { - ctx := context.TODO() - // backward compatibility, triggered during updates - if c.store.Spec.Gateway.Instances < 1 { - // Set the minimum of at least one instance - logger.Warning("spec.gateway.instances must be set to at least 1") - c.store.Spec.Gateway.Instances = 1 - } - - // start a new deployment and scale up - desiredRgwInstances := int(c.store.Spec.Gateway.Instances) - // If running on Pacific we force a single deployment and later set the deployment replica to the "instances" value - if c.clusterInfo.CephVersion.IsAtLeastPacific() { - desiredRgwInstances = 1 - } - for i := 0; i < desiredRgwInstances; i++ { - var err error - - daemonLetterID := k8sutil.IndexToName(i) - // Each rgw is id'ed by - - daemonName := fmt.Sprintf("%s-%s", c.store.Name, daemonLetterID) - // resource name is rook-ceph-rgw-- - resourceName := fmt.Sprintf("%s-%s-%s", AppName, c.store.Name, daemonLetterID) - - rgwConfig := &rgwConfig{ - ResourceName: resourceName, - DaemonID: daemonName, - Realm: realmName, - ZoneGroup: zoneGroupName, - Zone: zoneName, - } - - // We set the owner reference of the Secret to the Object controller instead of the replicaset - // because we watch for that resource and reconcile if anything happens to it - _, err = c.generateKeyring(rgwConfig) - if err != nil { - return errors.Wrap(err, "failed to create rgw keyring") - } - - // Set the rgw config flags - // Previously we were checking if the deployment was present, if not we would set the config flags - // Which means that we would only set the flag on newly created CephObjectStore CR - // Unfortunately, on upgrade we would not set the flags which is not ideal for old clusters where we were no setting those flags - // The KV supports setting those flags even if the RGW is running - logger.Info("setting rgw config flags") - err = c.setDefaultFlagsMonConfigStore(rgwConfig.ResourceName) - if err != nil { - // Getting EPERM typically happens when the flag may not be modified at runtime - // This is fine to ignore - code, ok := exec.ExitStatus(err) - if ok && code != int(syscall.EPERM) { - return errors.Wrap(err, "failed to set default rgw config options") - } - } - - // Create deployment - deployment, err := c.createDeployment(rgwConfig) - if err != nil { - return nil - } - logger.Infof("object store %q deployment %q started", c.store.Name, deployment.Name) - - // Set owner ref to cephObjectStore object - err = c.ownerInfo.SetControllerReference(deployment) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference for rgw deployment %q", deployment.Name) - } - - // Set the deployment hash as an annotation - err = patch.DefaultAnnotator.SetLastAppliedAnnotation(deployment) - if err != nil { - return errors.Wrapf(err, "failed to set annotation for deployment %q", deployment.Name) - } - - _, createErr := c.context.Clientset.AppsV1().Deployments(c.store.Namespace).Create(ctx, deployment, metav1.CreateOptions{}) - if createErr != nil { - if !kerrors.IsAlreadyExists(createErr) { - return errors.Wrap(createErr, "failed to create rgw deployment") - } - logger.Infof("object store %q deployment %q already exists. updating if needed", c.store.Name, deployment.Name) - if err := updateDeploymentAndWait(c.context, c.clusterInfo, deployment, config.RgwType, daemonLetterID, c.clusterSpec.SkipUpgradeChecks, c.clusterSpec.ContinueUpgradeAfterChecksEvenIfNotHealthy); err != nil { - return errors.Wrapf(err, "failed to update object store %q deployment %q", c.store.Name, deployment.Name) - } - } - - // Generate the mime.types file after the rep. controller as well for the same reason as keyring - if err := c.generateMimeTypes(); err != nil { - return errors.Wrap(err, "failed to generate the rgw mime.types config") - } - } - - // scale down scenario - deps, err := k8sutil.GetDeployments(c.context.Clientset, c.store.Namespace, c.storeLabelSelector()) - if err != nil { - logger.Warningf("could not get deployments for object store %q (matching label selector %q). %v", c.store.Name, c.storeLabelSelector(), err) - } - - currentRgwInstances := int(len(deps.Items)) - if currentRgwInstances > desiredRgwInstances { - logger.Infof("found more rgw deployments %d than desired %d in object store %q, scaling down", currentRgwInstances, c.store.Spec.Gateway.Instances, c.store.Name) - diffCount := currentRgwInstances - desiredRgwInstances - for i := 0; i < diffCount; { - depIDToRemove := currentRgwInstances - 1 - depNameToRemove := fmt.Sprintf("%s-%s-%s", AppName, c.store.Name, k8sutil.IndexToName(depIDToRemove)) - if err := k8sutil.DeleteDeployment(c.context.Clientset, c.store.Namespace, depNameToRemove); err != nil { - logger.Warningf("error during deletion of deployment %q resource. %v", depNameToRemove, err) - } - currentRgwInstances = currentRgwInstances - 1 - i++ - - // Delete the Secret key - secretToRemove := c.generateSecretName(k8sutil.IndexToName(depIDToRemove)) - err = c.context.Clientset.CoreV1().Secrets(c.store.Namespace).Delete(ctx, secretToRemove, metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - logger.Warningf("failed to delete rgw secret %q. %v", secretToRemove, err) - } - - err := c.deleteRgwCephObjects(depNameToRemove) - if err != nil { - logger.Warningf("%v", err) - } - } - // verify scale down was successful - deps, err = k8sutil.GetDeployments(c.context.Clientset, c.store.Namespace, c.storeLabelSelector()) - if err != nil { - logger.Warningf("could not get deployments for object store %q (matching label selector %q). %v", c.store.Name, c.storeLabelSelector(), err) - } - currentRgwInstances = len(deps.Items) - if currentRgwInstances == desiredRgwInstances { - logger.Infof("successfully scaled down rgw deployments to %d in object store %q", desiredRgwInstances, c.store.Name) - } - } - - return nil -} - -// Delete the object store. -// WARNING: This is a very destructive action that deletes all metadata and data pools. -func (c *clusterConfig) deleteStore() { - logger.Infof("deleting object store %q from namespace %q", c.store.Name, c.store.Namespace) - - if !c.clusterSpec.External.Enable { - // Delete rgw CephX keys and configuration in centralized mon database - for i := 0; i < int(c.store.Spec.Gateway.Instances); i++ { - daemonLetterID := k8sutil.IndexToName(i) - depNameToRemove := fmt.Sprintf("%s-%s-%s", AppName, c.store.Name, daemonLetterID) - - err := c.deleteRgwCephObjects(depNameToRemove) - if err != nil { - logger.Errorf("failed to delete rgw CephX keys and configuration. Error: %v", err) - } - } - - // Delete the realm and pools - objContext, err := NewMultisiteContext(c.context, c.clusterInfo, c.store) - if err != nil { - logger.Errorf("failed to set multisite on object store %q. Error: %v", c.store.Name, err) - } - - objContext.Endpoint = c.store.Status.Info["endpoint"] - - go disableRGWDashboard(objContext) - - err = deleteRealmAndPools(objContext, c.store.Spec) - if err != nil { - logger.Errorf("failed to delete the realm and pools. Error: %v", err) - } - } - - logger.Infof("done deleting object store %q from namespace %q", c.store.Name, c.store.Namespace) -} - -func (c *clusterConfig) deleteRgwCephObjects(depNameToRemove string) error { - logger.Infof("deleting rgw CephX key and configuration in centralized mon database for %q", depNameToRemove) - - // Delete configuration in centralized mon database - err := c.deleteFlagsMonConfigStore(depNameToRemove) - if err != nil { - return err - } - - err = cephclient.AuthDelete(c.context, c.clusterInfo, generateCephXUser(depNameToRemove)) - if err != nil { - return err - } - - logger.Infof("completed deleting rgw CephX key and configuration in centralized mon database for %q", depNameToRemove) - return nil -} - -func instanceName(name string) string { - return fmt.Sprintf("%s-%s", AppName, name) -} - -func (c *clusterConfig) storeLabelSelector() string { - return fmt.Sprintf("rook_object_store=%s", c.store.Name) -} - -// Validate the object store arguments -func (r *ReconcileCephObjectStore) validateStore(s *cephv1.CephObjectStore) error { - if err := cephv1.ValidateObjectSpec(s); err != nil { - return err - } - - // Validate the pool settings, but allow for empty pools specs in case they have already been created - // such as by the ceph mgr - if !emptyPool(s.Spec.MetadataPool) { - if err := pool.ValidatePoolSpec(r.context, r.clusterInfo, r.clusterSpec, &s.Spec.MetadataPool); err != nil { - return errors.Wrap(err, "invalid metadata pool spec") - } - } - if !emptyPool(s.Spec.DataPool) { - if err := pool.ValidatePoolSpec(r.context, r.clusterInfo, r.clusterSpec, &s.Spec.DataPool); err != nil { - return errors.Wrap(err, "invalid data pool spec") - } - } - - return nil -} - -func (c *clusterConfig) generateSecretName(id string) string { - return fmt.Sprintf("%s-%s-%s-keyring", AppName, c.store.Name, id) -} - -func emptyPool(pool cephv1.PoolSpec) bool { - return reflect.DeepEqual(pool, cephv1.PoolSpec{}) -} - -// BuildDomainName build the dns name to reach out the service endpoint -func BuildDomainName(name, namespace string) string { - return fmt.Sprintf("%s-%s.%s.%s", AppName, name, namespace, svcDNSSuffix) -} - -// BuildDNSEndpoint build the dns name to reach out the service endpoint -func BuildDNSEndpoint(domainName string, port int32, secure bool) string { - httpPrefix := "http" - if secure { - httpPrefix = "https" - } - return fmt.Sprintf("%s://%s:%d", httpPrefix, domainName, port) -} - -// GetTLSCACert fetch cacert for internal RGW requests -func GetTlsCaCert(objContext *Context, objectStoreSpec *cephv1.ObjectStoreSpec) ([]byte, error) { - ctx := context.TODO() - var ( - tlsCert []byte - err error - ) - - if objectStoreSpec.Gateway.SSLCertificateRef != "" { - tlsSecretCert, err := objContext.Context.Clientset.CoreV1().Secrets(objContext.clusterInfo.Namespace).Get(ctx, objectStoreSpec.Gateway.SSLCertificateRef, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "failed to get secret %s containing TLS certificate defined in %s", objectStoreSpec.Gateway.SSLCertificateRef, objContext.Name) - } - if tlsSecretCert.Type == v1.SecretTypeOpaque { - tlsCert = tlsSecretCert.Data[certKeyName] - } else if tlsSecretCert.Type == v1.SecretTypeTLS { - tlsCert = tlsSecretCert.Data[v1.TLSCertKey] - } - } else if objectStoreSpec.GetServiceServingCert() != "" { - tlsCert, err = ioutil.ReadFile(ServiceServingCertCAFile) - if err != nil { - return nil, errors.Wrapf(err, "failed to fetch TLS certificate from %q", ServiceServingCertCAFile) - } - } - - return tlsCert, nil -} - -func GenObjectStoreHTTPClient(objContext *Context, spec *cephv1.ObjectStoreSpec) (*http.Client, []byte, error) { - nsName := fmt.Sprintf("%s/%s", objContext.clusterInfo.Namespace, objContext.Name) - c := &http.Client{} - tlsCert := []byte{} - if spec.IsTLSEnabled() { - var err error - tlsCert, err = GetTlsCaCert(objContext, spec) - if err != nil { - return nil, tlsCert, errors.Wrapf(err, "failed to fetch CA cert to establish TLS connection with object store %q", nsName) - } - c.Transport = BuildTransportTLS(tlsCert) - } - return c, tlsCert, nil -} diff --git a/pkg/operator/ceph/object/rgw_test.go b/pkg/operator/ceph/object/rgw_test.go deleted file mode 100644 index 0e0f45f75..000000000 --- a/pkg/operator/ceph/object/rgw_test.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - "io/ioutil" - "os" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/rook/rook/pkg/clusterd" - - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" - "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/k8sutil" - testop "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - fclient "k8s.io/client-go/kubernetes/fake" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestStartRGW(t *testing.T) { - ctx := context.TODO() - clientset := testop.New(t, 3) - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "auth" && args[1] == "get-or-create-key" { - return `{"key":"mysecurekey"}`, nil - } - return `{"id":"test-id"}`, nil - }, - } - - configDir, _ := ioutil.TempDir("", "") - defer os.RemoveAll(configDir) - info := clienttest.CreateTestClusterInfo(1) - context := &clusterd.Context{Clientset: clientset, Executor: executor, ConfigDir: configDir} - store := simpleStore() - store.Spec.Gateway.Instances = 1 - version := "v1.1.0" - data := config.NewStatelessDaemonDataPathMap(config.RgwType, "my-fs", "rook-ceph", "/var/lib/rook/") - - s := scheme.Scheme - object := []runtime.Object{&cephv1.CephObjectStore{}} - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - r := &ReconcileCephObjectStore{client: cl, scheme: s} - - // start a basic cluster - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := &clusterConfig{context, info, store, version, &cephv1.ClusterSpec{}, ownerInfo, data, r.client} - err := c.startRGWPods(store.Name, store.Name, store.Name) - assert.Nil(t, err) - - validateStart(ctx, t, c, clientset) -} - -func validateStart(ctx context.Context, t *testing.T, c *clusterConfig, clientset *fclient.Clientset) { - rgwName := instanceName(c.store.Name) + "-a" - r, err := clientset.AppsV1().Deployments(c.store.Namespace).Get(ctx, rgwName, metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, rgwName, r.Name) -} - -func TestCreateObjectStore(t *testing.T) { - commandWithOutputFunc := func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if command == "ceph" { - if args[1] == "erasure-code-profile" { - return `{"k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}`, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return `{"key":"mykey"}`, nil - } - } else { - return `{"realms": []}`, nil - } - return "", nil - } - executor := &exectest.MockExecutor{ - MockExecuteCommandWithCombinedOutput: commandWithOutputFunc, - MockExecuteCommandWithOutput: commandWithOutputFunc, - } - - store := simpleStore() - clientset := testop.New(t, 3) - context := &clusterd.Context{Executor: executor, Clientset: clientset} - info := clienttest.CreateTestClusterInfo(1) - data := config.NewStatelessDaemonDataPathMap(config.RgwType, "my-fs", "rook-ceph", "/var/lib/rook/") - - // create the pools - s := scheme.Scheme - object := []runtime.Object{&cephv1.CephObjectStore{}} - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - r := &ReconcileCephObjectStore{client: cl, scheme: s} - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - c := &clusterConfig{context, info, store, "1.2.3.4", &cephv1.ClusterSpec{}, ownerInfo, data, r.client} - err := c.createOrUpdateStore(store.Name, store.Name, store.Name) - assert.Nil(t, err) -} - -func simpleStore() *cephv1.CephObjectStore { - return &cephv1.CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "mycluster"}, - Spec: cephv1.ObjectStoreSpec{ - MetadataPool: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}, - DataPool: cephv1.PoolSpec{ErasureCoded: cephv1.ErasureCodedSpec{CodingChunks: 1, DataChunks: 2}}, - Gateway: cephv1.GatewaySpec{Port: 123}, - }, - } -} - -func TestGenerateSecretName(t *testing.T) { - cl := fake.NewClientBuilder().Build() - - // start a basic cluster - c := &clusterConfig{&clusterd.Context{}, - &cephclient.ClusterInfo{}, - &cephv1.CephObjectStore{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "mycluster"}}, - "v1.1.0", - &cephv1.ClusterSpec{}, - &k8sutil.OwnerInfo{}, - &config.DataPathMap{}, - cl} - secret := c.generateSecretName("a") - assert.Equal(t, "rook-ceph-rgw-default-a-keyring", secret) -} - -func TestEmptyPoolSpec(t *testing.T) { - assert.True(t, emptyPool(cephv1.PoolSpec{})) - - p := cephv1.PoolSpec{FailureDomain: "foo"} - assert.False(t, emptyPool(p)) - - p = cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1}} - assert.False(t, emptyPool(p)) - - p = cephv1.PoolSpec{ErasureCoded: cephv1.ErasureCodedSpec{CodingChunks: 1}} - assert.False(t, emptyPool(p)) -} - -func TestBuildDomainNameAndEndpoint(t *testing.T) { - name := "my-store" - ns := "rook-ceph" - dns := BuildDomainName(name, ns) - assert.Equal(t, "rook-ceph-rgw-my-store.rook-ceph.svc", dns) - - // non-secure endpoint - var port int32 = 80 - ep := BuildDNSEndpoint(dns, port, false) - assert.Equal(t, "http://rook-ceph-rgw-my-store.rook-ceph.svc:80", ep) - - // Secure endpoint - var securePort int32 = 443 - ep = BuildDNSEndpoint(dns, securePort, true) - assert.Equal(t, "https://rook-ceph-rgw-my-store.rook-ceph.svc:443", ep) -} diff --git a/pkg/operator/ceph/object/s3-handlers.go b/pkg/operator/ceph/object/s3-handlers.go deleted file mode 100644 index 98701f734..000000000 --- a/pkg/operator/ceph/object/s3-handlers.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "bytes" - "crypto/tls" - "crypto/x509" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/pkg/errors" -) - -// S3Agent wraps the s3.S3 structure to allow for wrapper methods -type S3Agent struct { - Client *s3.S3 -} - -func NewS3Agent(accessKey, secretKey, endpoint string, debug bool, tlsCert []byte) (*S3Agent, error) { - return newS3Agent(accessKey, secretKey, endpoint, debug, tlsCert, false) -} - -func NewTestOnlyS3Agent(accessKey, secretKey, endpoint string, debug bool) (*S3Agent, error) { - return newS3Agent(accessKey, secretKey, endpoint, debug, nil, true) -} - -func newS3Agent(accessKey, secretKey, endpoint string, debug bool, tlsCert []byte, insecure bool) (*S3Agent, error) { - const cephRegion = "us-east-1" - - logLevel := aws.LogOff - if debug { - logLevel = aws.LogDebug - } - client := http.Client{ - Timeout: HttpTimeOut, - } - tlsEnabled := false - if len(tlsCert) > 0 || insecure { - tlsEnabled = true - if len(tlsCert) > 0 { - client.Transport = BuildTransportTLS(tlsCert) - } else if insecure { - client.Transport = &http.Transport{ - // #nosec G402 is enabled only for testing - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - } - } - sess, err := session.NewSession( - aws.NewConfig(). - WithRegion(cephRegion). - WithCredentials(credentials.NewStaticCredentials(accessKey, secretKey, "")). - WithEndpoint(endpoint). - WithS3ForcePathStyle(true). - WithMaxRetries(5). - WithDisableSSL(!tlsEnabled). - WithHTTPClient(&client). - WithLogLevel(logLevel), - ) - if err != nil { - return nil, err - } - svc := s3.New(sess) - return &S3Agent{ - Client: svc, - }, nil -} - -// CreateBucket creates a bucket with the given name -func (s *S3Agent) CreateBucketNoInfoLogging(name string) error { - return s.createBucket(name, false) -} - -// CreateBucket creates a bucket with the given name -func (s *S3Agent) CreateBucket(name string) error { - return s.createBucket(name, true) -} - -func (s *S3Agent) createBucket(name string, infoLogging bool) error { - if infoLogging { - logger.Infof("creating bucket %q", name) - } else { - logger.Debugf("creating bucket %q", name) - } - bucketInput := &s3.CreateBucketInput{ - Bucket: &name, - } - _, err := s.Client.CreateBucket(bucketInput) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - logger.Debugf("DEBUG: after s3 call, ok=%v, aerr=%v", ok, aerr) - switch aerr.Code() { - case s3.ErrCodeBucketAlreadyExists: - logger.Debugf("bucket %q already exists", name) - return nil - case s3.ErrCodeBucketAlreadyOwnedByYou: - logger.Debugf("bucket %q already owned by you", name) - return nil - } - } - return errors.Wrapf(err, "failed to create bucket %q", name) - } - - if infoLogging { - logger.Infof("successfully created bucket %q", name) - } else { - logger.Debugf("successfully created bucket %q", name) - } - return nil -} - -// DeleteBucket function deletes given bucket using s3 client -func (s *S3Agent) DeleteBucket(name string) (bool, error) { - _, err := s.Client.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(name), - }) - if err != nil { - logger.Errorf("failed to delete bucket. %v", err) - return false, err - - } - return true, nil -} - -// PutObjectInBucket function puts an object in a bucket using s3 client -func (s *S3Agent) PutObjectInBucket(bucketname string, body string, key string, - contentType string) (bool, error) { - _, err := s.Client.PutObject(&s3.PutObjectInput{ - Body: strings.NewReader(body), - Bucket: &bucketname, - Key: &key, - ContentType: &contentType, - }) - if err != nil { - logger.Errorf("failed to put object in bucket. %v", err) - return false, err - - } - return true, nil -} - -// GetObjectInBucket function retrieves an object from a bucket using s3 client -func (s *S3Agent) GetObjectInBucket(bucketname string, key string) (string, error) { - result, err := s.Client.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(bucketname), - Key: aws.String(key), - }) - - if err != nil { - logger.Errorf("failed to retrieve object from bucket. %v", err) - return "ERROR_ OBJECT NOT FOUND", err - - } - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(result.Body) - if err != nil { - return "", err - } - - return buf.String(), nil -} - -// DeleteObjectInBucket function deletes given bucket using s3 client -func (s *S3Agent) DeleteObjectInBucket(bucketname string, key string) (bool, error) { - _, err := s.Client.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucketname), - Key: aws.String(key), - }) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case s3.ErrCodeNoSuchBucket: - return true, nil - case s3.ErrCodeNoSuchKey: - return true, nil - } - } - logger.Errorf("failed to delete object from bucket. %v", err) - return false, err - - } - return true, nil -} - -func BuildTransportTLS(tlsCert []byte) *http.Transport { - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(tlsCert) - - return &http.Transport{ - TLSClientConfig: &tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12}, - } -} diff --git a/pkg/operator/ceph/object/spec.go b/pkg/operator/ceph/object/spec.go deleted file mode 100644 index 1aef1d5d2..000000000 --- a/pkg/operator/ceph/object/spec.go +++ /dev/null @@ -1,614 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - "fmt" - "path" - "reflect" - "strings" - - "github.com/hashicorp/vault/api" - "github.com/libopenstorage/secrets/vault" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/daemon/ceph/osd/kms" - cephconfig "github.com/rook/rook/pkg/operator/ceph/config" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - livenessProbePath = "/swift/healthcheck" - // #nosec G101 since this is not leaking any hardcoded details - setupVaultTokenFile = ` -set -e - -VAULT_TOKEN_OLD_PATH=%s -VAULT_TOKEN_NEW_PATH=%s - -cp --verbose $VAULT_TOKEN_OLD_PATH $VAULT_TOKEN_NEW_PATH - -chmod --verbose 400 $VAULT_TOKEN_NEW_PATH - -chown --verbose ceph:ceph $VAULT_TOKEN_NEW_PATH -` -) - -func (c *clusterConfig) createDeployment(rgwConfig *rgwConfig) (*apps.Deployment, error) { - pod, err := c.makeRGWPodSpec(rgwConfig) - if err != nil { - return nil, err - } - replicas := int32(1) - // On Pacific, we can use the same keyring and have dedicated rgw instances reflected in the service map - if c.clusterInfo.CephVersion.IsAtLeastPacific() { - replicas = c.store.Spec.Gateway.Instances - } - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: rgwConfig.ResourceName, - Namespace: c.store.Namespace, - Labels: getLabels(c.store.Name, c.store.Namespace, true), - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: getLabels(c.store.Name, c.store.Namespace, false), - }, - Template: pod, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - k8sutil.AddRookVersionLabelToDeployment(d) - c.store.Spec.Gateway.Annotations.ApplyToObjectMeta(&d.ObjectMeta) - c.store.Spec.Gateway.Labels.ApplyToObjectMeta(&d.ObjectMeta) - controller.AddCephVersionLabelToDeployment(c.clusterInfo.CephVersion, d) - - return d, nil -} - -func (c *clusterConfig) makeRGWPodSpec(rgwConfig *rgwConfig) (v1.PodTemplateSpec, error) { - rgwDaemonContainer := c.makeDaemonContainer(rgwConfig) - if reflect.DeepEqual(rgwDaemonContainer, v1.Container{}) { - return v1.PodTemplateSpec{}, errors.New("got empty container for RGW daemon") - } - podSpec := v1.PodSpec{ - InitContainers: []v1.Container{ - c.makeChownInitContainer(rgwConfig), - }, - Containers: []v1.Container{rgwDaemonContainer}, - RestartPolicy: v1.RestartPolicyAlways, - Volumes: append( - controller.DaemonVolumes(c.DataPathMap, rgwConfig.ResourceName), - c.mimeTypesVolume(), - ), - HostNetwork: c.clusterSpec.Network.IsHost(), - PriorityClassName: c.store.Spec.Gateway.PriorityClassName, - } - - // If the log collector is enabled we add the side-car container - if c.clusterSpec.LogCollector.Enabled { - shareProcessNamespace := true - podSpec.ShareProcessNamespace = &shareProcessNamespace - podSpec.Containers = append(podSpec.Containers, *controller.LogCollectorContainer(strings.TrimPrefix(generateCephXUser(fmt.Sprintf("ceph-client.%s", rgwConfig.ResourceName)), "client."), c.clusterInfo.Namespace, *c.clusterSpec)) - } - - // Replace default unreachable node toleration - k8sutil.AddUnreachableNodeToleration(&podSpec) - - // Set the ssl cert if specified - if c.store.Spec.Gateway.SecurePort != 0 { - secretVolSrc, err := c.generateVolumeSourceWithTLSSecret() - if err != nil { - return v1.PodTemplateSpec{}, err - } - certVol := v1.Volume{ - Name: certVolumeName, - VolumeSource: v1.VolumeSource{ - Secret: secretVolSrc, - }} - podSpec.Volumes = append(podSpec.Volumes, certVol) - } - // Check custom caBundle provided - if c.store.Spec.Gateway.CaBundleRef != "" { - customCaBundleVolSrc, err := c.generateVolumeSourceWithCaBundleSecret() - if err != nil { - return v1.PodTemplateSpec{}, err - } - customCaBundleVol := v1.Volume{ - Name: caBundleVolumeName, - VolumeSource: v1.VolumeSource{ - Secret: customCaBundleVolSrc, - }} - podSpec.Volumes = append(podSpec.Volumes, customCaBundleVol) - updatedCaBundleVol := v1.Volume{ - Name: caBundleUpdatedVolumeName, - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }} - podSpec.Volumes = append(podSpec.Volumes, updatedCaBundleVol) - podSpec.InitContainers = append(podSpec.InitContainers, - c.createCaBundleUpdateInitContainer(rgwConfig)) - } - kmsEnabled, err := c.CheckRGWKMS() - if err != nil { - return v1.PodTemplateSpec{}, err - } - if kmsEnabled { - if c.store.Spec.Security.KeyManagementService.IsTokenAuthEnabled() { - podSpec.Volumes = append(podSpec.Volumes, - kms.VaultTokenFileVolume(c.store.Spec.Security.KeyManagementService.TokenSecretName)) - podSpec.InitContainers = append(podSpec.InitContainers, - c.vaultTokenInitContainer(rgwConfig)) - } - } - c.store.Spec.Gateway.Placement.ApplyToPodSpec(&podSpec) - - // If host networking is not enabled, preferred pod anti-affinity is added to the rgw daemons - labels := getLabels(c.store.Name, c.store.Namespace, false) - k8sutil.SetNodeAntiAffinityForPod(&podSpec, c.clusterSpec.Network.IsHost(), v1.LabelHostname, labels, nil) - - podTemplateSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: rgwConfig.ResourceName, - Labels: getLabels(c.store.Name, c.store.Namespace, true), - }, - Spec: podSpec, - } - c.store.Spec.Gateway.Annotations.ApplyToObjectMeta(&podTemplateSpec.ObjectMeta) - c.store.Spec.Gateway.Labels.ApplyToObjectMeta(&podTemplateSpec.ObjectMeta) - - if c.clusterSpec.Network.IsHost() { - podTemplateSpec.Spec.DNSPolicy = v1.DNSClusterFirstWithHostNet - } else if c.clusterSpec.Network.IsMultus() { - if err := k8sutil.ApplyMultus(c.clusterSpec.Network, &podTemplateSpec.ObjectMeta); err != nil { - return podTemplateSpec, err - } - } - - return podTemplateSpec, nil -} - -func (c *clusterConfig) createCaBundleUpdateInitContainer(rgwConfig *rgwConfig) v1.Container { - caBundleMount := v1.VolumeMount{Name: caBundleVolumeName, MountPath: caBundleSourceCustomDir, ReadOnly: true} - volumeMounts := append(controller.DaemonVolumeMounts(c.DataPathMap, rgwConfig.ResourceName), caBundleMount) - updatedCaBundleDir := "/tmp/new-ca-bundle/" - updatedBundleMount := v1.VolumeMount{Name: caBundleUpdatedVolumeName, MountPath: updatedCaBundleDir, ReadOnly: false} - volumeMounts = append(volumeMounts, updatedBundleMount) - return v1.Container{ - Name: "update-ca-bundle-initcontainer", - Command: []string{"/bin/bash", "-c"}, - // copy all content of caBundleExtractedDir to avoid directory mount itself - Args: []string{ - fmt.Sprintf("/usr/bin/update-ca-trust extract; cp -rf %s/* %s", caBundleExtractedDir, updatedCaBundleDir), - }, - Image: c.clusterSpec.CephVersion.Image, - VolumeMounts: volumeMounts, - Resources: c.store.Spec.Gateway.Resources, - SecurityContext: controller.PodSecurityContext(), - } -} - -// The vault token is passed as Secret for rgw container. So it is mounted as read only. -// RGW has restrictions over vault token file, it should owned by same user(ceph) which -// rgw daemon runs and all other permission should be nil or zero. Here ownership can be -// changed with help of FSGroup but in openshift environments for security reasons it has -// predefined value, so it won't work there. Hence the token file is copied to containerDataDir -// from mounted secret then ownership/permissions are changed accordingly with help of a -// init container. -func (c *clusterConfig) vaultTokenInitContainer(rgwConfig *rgwConfig) v1.Container { - _, volMount := kms.VaultVolumeAndMount(c.store.Spec.Security.KeyManagementService.ConnectionDetails) - return v1.Container{ - Name: "vault-initcontainer-token-file-setup", - Command: []string{ - "/bin/bash", - "-c", - fmt.Sprintf(setupVaultTokenFile, - path.Join(kms.EtcVaultDir, kms.VaultFileName), path.Join(c.DataPathMap.ContainerDataDir, kms.VaultFileName)), - }, - Image: c.clusterSpec.CephVersion.Image, - VolumeMounts: append( - controller.DaemonVolumeMounts(c.DataPathMap, rgwConfig.ResourceName), volMount), - Resources: c.store.Spec.Gateway.Resources, - SecurityContext: controller.PodSecurityContext(), - } -} - -func (c *clusterConfig) makeChownInitContainer(rgwConfig *rgwConfig) v1.Container { - return controller.ChownCephDataDirsInitContainer( - *c.DataPathMap, - c.clusterSpec.CephVersion.Image, - controller.DaemonVolumeMounts(c.DataPathMap, rgwConfig.ResourceName), - c.store.Spec.Gateway.Resources, - controller.PodSecurityContext(), - ) -} - -func (c *clusterConfig) makeDaemonContainer(rgwConfig *rgwConfig) v1.Container { - // start the rgw daemon in the foreground - container := v1.Container{ - Name: "rgw", - Image: c.clusterSpec.CephVersion.Image, - Command: []string{ - "radosgw", - }, - Args: append( - controller.DaemonFlags(c.clusterInfo, c.clusterSpec, - strings.TrimPrefix(generateCephXUser(rgwConfig.ResourceName), "client.")), - "--foreground", - cephconfig.NewFlag("rgw frontends", fmt.Sprintf("%s %s", rgwFrontendName, c.portString())), - cephconfig.NewFlag("host", controller.ContainerEnvVarReference(k8sutil.PodNameEnvVar)), - cephconfig.NewFlag("rgw-mime-types-file", mimeTypesMountPath()), - cephconfig.NewFlag("rgw realm", rgwConfig.Realm), - cephconfig.NewFlag("rgw zonegroup", rgwConfig.ZoneGroup), - cephconfig.NewFlag("rgw zone", rgwConfig.Zone), - ), - VolumeMounts: append( - controller.DaemonVolumeMounts(c.DataPathMap, rgwConfig.ResourceName), - c.mimeTypesVolumeMount(), - ), - Env: controller.DaemonEnvVars(c.clusterSpec.CephVersion.Image), - Resources: c.store.Spec.Gateway.Resources, - LivenessProbe: c.generateLiveProbe(), - SecurityContext: controller.PodSecurityContext(), - WorkingDir: cephconfig.VarLogCephDir, - } - - // If the liveness probe is enabled - configureLivenessProbe(&container, c.store.Spec.HealthCheck) - if c.store.Spec.IsTLSEnabled() { - // Add a volume mount for the ssl certificate - mount := v1.VolumeMount{Name: certVolumeName, MountPath: certDir, ReadOnly: true} - container.VolumeMounts = append(container.VolumeMounts, mount) - } - if c.store.Spec.Gateway.CaBundleRef != "" { - updatedBundleMount := v1.VolumeMount{Name: caBundleUpdatedVolumeName, MountPath: caBundleExtractedDir, ReadOnly: true} - container.VolumeMounts = append(container.VolumeMounts, updatedBundleMount) - } - kmsEnabled, err := c.CheckRGWKMS() - if err != nil { - logger.Errorf("failed to enable KMS. %v", err) - return v1.Container{} - } - if kmsEnabled { - container.Args = append(container.Args, - cephconfig.NewFlag("rgw crypt s3 kms backend", - c.store.Spec.Security.KeyManagementService.ConnectionDetails[kms.Provider]), - cephconfig.NewFlag("rgw crypt vault addr", - c.store.Spec.Security.KeyManagementService.ConnectionDetails[api.EnvVaultAddress]), - ) - if c.store.Spec.Security.KeyManagementService.IsTokenAuthEnabled() { - container.Args = append(container.Args, - cephconfig.NewFlag("rgw crypt vault auth", kms.KMSTokenSecretNameKey), - cephconfig.NewFlag("rgw crypt vault token file", - path.Join(c.DataPathMap.ContainerDataDir, kms.VaultFileName)), - cephconfig.NewFlag("rgw crypt vault prefix", c.vaultPrefixRGW()), - cephconfig.NewFlag("rgw crypt vault secret engine", - c.store.Spec.Security.KeyManagementService.ConnectionDetails[kms.VaultSecretEngineKey]), - ) - } - } - return container -} - -// configureLivenessProbe returns the desired liveness probe for a given daemon -func configureLivenessProbe(container *v1.Container, healthCheck cephv1.BucketHealthCheckSpec) { - if ok := healthCheck.LivenessProbe; ok != nil { - if !healthCheck.LivenessProbe.Disabled { - probe := healthCheck.LivenessProbe.Probe - // If the spec value is empty, let's use a default - if probe != nil { - // Set the liveness probe on the container to overwrite the default probe created by Rook - container.LivenessProbe = cephconfig.GetLivenessProbeWithDefaults(probe, container.LivenessProbe) - } - } else { - container.LivenessProbe = nil - } - } -} - -func (c *clusterConfig) generateLiveProbe() *v1.Probe { - return &v1.Probe{ - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Path: livenessProbePath, - Port: c.generateLiveProbePort(), - Scheme: c.generateLiveProbeScheme(), - }, - }, - InitialDelaySeconds: 10, - } -} - -func (c *clusterConfig) generateLiveProbeScheme() v1.URIScheme { - // Default to HTTP - uriScheme := v1.URISchemeHTTP - - // If rgw is configured to use a secured port we need get on https:// - // Only do this when the Non-SSL port is not used - if c.store.Spec.Gateway.Port == 0 && c.store.Spec.IsTLSEnabled() { - uriScheme = v1.URISchemeHTTPS - } - - return uriScheme -} - -func (c *clusterConfig) generateLiveProbePort() intstr.IntOrString { - // The port the liveness probe needs to probe - // Assume we run on SDN by default - port := intstr.FromInt(int(rgwPortInternalPort)) - - // If Host Networking is enabled, the port from the spec must be reflected - if c.clusterSpec.Network.IsHost() { - port = intstr.FromInt(int(c.store.Spec.Gateway.Port)) - } - - if c.store.Spec.Gateway.Port == 0 && c.store.Spec.IsTLSEnabled() { - port = intstr.FromInt(int(c.store.Spec.Gateway.SecurePort)) - } - return port -} - -func (c *clusterConfig) generateService(cephObjectStore *cephv1.CephObjectStore) *v1.Service { - svc := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: instanceName(cephObjectStore.Name), - Namespace: cephObjectStore.Namespace, - Labels: getLabels(cephObjectStore.Name, cephObjectStore.Namespace, true), - }, - } - - if c.store.Spec.Gateway.Service != nil { - c.store.Spec.Gateway.Service.Annotations.ApplyToObjectMeta(&svc.ObjectMeta) - } - if c.clusterSpec.Network.IsHost() { - svc.Spec.ClusterIP = v1.ClusterIPNone - } - - destPort := c.generateLiveProbePort() - - // When the cluster is external we must use the same one as the gateways are listening on - if cephObjectStore.Spec.IsExternal() { - destPort.IntVal = cephObjectStore.Spec.Gateway.Port - } else { - // If the cluster is not external we add the Selector - svc.Spec = v1.ServiceSpec{ - Selector: getLabels(cephObjectStore.Name, cephObjectStore.Namespace, false), - } - } - - addPort(svc, "http", cephObjectStore.Spec.Gateway.Port, destPort.IntVal) - addPort(svc, "https", cephObjectStore.Spec.Gateway.SecurePort, cephObjectStore.Spec.Gateway.SecurePort) - - return svc -} - -func (c *clusterConfig) generateEndpoint(cephObjectStore *cephv1.CephObjectStore) *v1.Endpoints { - labels := getLabels(cephObjectStore.Name, cephObjectStore.Namespace, true) - - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: instanceName(cephObjectStore.Name), - Namespace: cephObjectStore.Namespace, - Labels: labels, - }, - Subsets: []v1.EndpointSubset{ - { - Addresses: cephObjectStore.Spec.Gateway.ExternalRgwEndpoints, - }, - }, - } - - addPortToEndpoint(endpoints, "http", cephObjectStore.Spec.Gateway.Port) - addPortToEndpoint(endpoints, "https", cephObjectStore.Spec.Gateway.SecurePort) - - return endpoints -} - -func (c *clusterConfig) reconcileExternalEndpoint(cephObjectStore *cephv1.CephObjectStore) error { - logger.Info("reconciling external object store service") - - endpoint := c.generateEndpoint(cephObjectStore) - // Set owner ref to the parent object - err := c.ownerInfo.SetControllerReference(endpoint) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to ceph object store endpoint %q", endpoint.Name) - } - - _, err = k8sutil.CreateOrUpdateEndpoint(c.context.Clientset, cephObjectStore.Namespace, endpoint) - if err != nil { - return errors.Wrapf(err, "failed to create or update object store %q endpoint", cephObjectStore.Name) - } - - return nil -} - -func (c *clusterConfig) reconcileService(cephObjectStore *cephv1.CephObjectStore) (string, error) { - service := c.generateService(cephObjectStore) - // Set owner ref to the parent object - err := c.ownerInfo.SetControllerReference(service) - if err != nil { - return "", errors.Wrapf(err, "failed to set owner reference to ceph object store service %q", service.Name) - } - - svc, err := k8sutil.CreateOrUpdateService(c.context.Clientset, cephObjectStore.Namespace, service) - if err != nil { - return "", errors.Wrapf(err, "failed to create or update object store %q service", cephObjectStore.Name) - } - - logger.Infof("ceph object store gateway service running at %s", svc.Spec.ClusterIP) - - return svc.Spec.ClusterIP, nil -} - -func (c *clusterConfig) vaultPrefixRGW() string { - secretEngine := c.store.Spec.Security.KeyManagementService.ConnectionDetails[kms.VaultSecretEngineKey] - vaultPrefixPath := "/v1/" - - switch secretEngine { - case kms.VaultKVSecretEngineKey: - vaultPrefixPath = path.Join(vaultPrefixPath, - c.store.Spec.Security.KeyManagementService.ConnectionDetails[vault.VaultBackendPathKey], "/data") - case kms.VaultTransitSecretEngineKey: - if c.clusterInfo.CephVersion.IsAtLeastPacific() { - vaultPrefixPath = path.Join(vaultPrefixPath, secretEngine, "/transit") - } else { - vaultPrefixPath = path.Join(vaultPrefixPath, secretEngine, "/export/encryption-key") - } - } - - return vaultPrefixPath -} - -func (c *clusterConfig) CheckRGWKMS() (bool, error) { - if c.store.Spec.Security != nil && c.store.Spec.Security.KeyManagementService.IsEnabled() { - err := kms.ValidateConnectionDetails(c.context, *c.store.Spec.Security, c.store.Namespace) - if err != nil { - return false, err - } - secretEngine := c.store.Spec.Security.KeyManagementService.ConnectionDetails[kms.VaultSecretEngineKey] - - // currently RGW supports kv(version 2) and transit secret engines in vault - switch secretEngine { - case kms.VaultKVSecretEngineKey: - kvVers := c.store.Spec.Security.KeyManagementService.ConnectionDetails[vault.VaultBackendKey] - if kvVers != "" { - if kvVers != "v2" { - return false, errors.New("failed to validate vault kv version, only v2 is supported") - } - } else { - // If VAUL_BACKEND is not specified let's assume it's v2 - logger.Warningf("%s is not set, assuming the only supported version 2", vault.VaultBackendKey) - c.store.Spec.Security.KeyManagementService.ConnectionDetails[vault.VaultBackendKey] = "v2" - } - return true, nil - case kms.VaultTransitSecretEngineKey: - return true, nil - default: - return false, errors.New("failed to validate vault secret engine") - - } - } - - return false, nil -} - -func addPort(service *v1.Service, name string, port, destPort int32) { - if port == 0 || destPort == 0 { - return - } - service.Spec.Ports = append(service.Spec.Ports, v1.ServicePort{ - Name: name, - Port: port, - TargetPort: intstr.FromInt(int(destPort)), - Protocol: v1.ProtocolTCP, - }) -} - -func addPortToEndpoint(endpoints *v1.Endpoints, name string, port int32) { - if port == 0 { - return - } - endpoints.Subsets[0].Ports = append(endpoints.Subsets[0].Ports, v1.EndpointPort{ - Name: name, - Port: port, - Protocol: v1.ProtocolTCP, - }, - ) -} - -func getLabels(name, namespace string, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, namespace, "rgw", name, includeNewLabels) - labels["rook_object_store"] = name - return labels -} - -func (c *clusterConfig) generateVolumeSourceWithTLSSecret() (*v1.SecretVolumeSource, error) { - // Keep the TLS secret as secure as possible in the container. Give only user read perms. - // Because the Secret mount is owned by "root" and fsGroup breaks on OCP since we cannot predict it - // Also, we don't want to change the SCC for fsGroup to RunAsAny since it has a major broader impact - // Let's open the permissions a bit more so that everyone can read the cert. - userReadOnly := int32(0444) - var secretVolSrc *v1.SecretVolumeSource - if c.store.Spec.Gateway.SSLCertificateRef != "" { - secretVolSrc = &v1.SecretVolumeSource{ - SecretName: c.store.Spec.Gateway.SSLCertificateRef, - } - secretType, err := c.rgwTLSSecretType(c.store.Spec.Gateway.SSLCertificateRef) - if err != nil { - return nil, err - } - switch secretType { - case v1.SecretTypeOpaque: - secretVolSrc.Items = []v1.KeyToPath{ - {Key: certKeyName, Path: certFilename, Mode: &userReadOnly}, - } - case v1.SecretTypeTLS: - secretVolSrc.Items = []v1.KeyToPath{ - {Key: v1.TLSCertKey, Path: certFilename, Mode: &userReadOnly}, - {Key: v1.TLSPrivateKeyKey, Path: certKeyFileName, Mode: &userReadOnly}, - } - } - } else if c.store.Spec.GetServiceServingCert() != "" { - secretVolSrc = &v1.SecretVolumeSource{ - SecretName: c.store.Spec.GetServiceServingCert(), - Items: []v1.KeyToPath{ - {Key: v1.TLSCertKey, Path: certFilename, Mode: &userReadOnly}, - {Key: v1.TLSPrivateKeyKey, Path: certKeyFileName, Mode: &userReadOnly}, - }} - } else { - return nil, errors.New("no TLS certificates found") - } - - return secretVolSrc, nil -} - -func (c *clusterConfig) generateVolumeSourceWithCaBundleSecret() (*v1.SecretVolumeSource, error) { - // Keep the ca-bundle as secure as possible in the container. Give only user read perms. - // Same as above for generateVolumeSourceWithTLSSecret function. - userReadOnly := int32(0400) - caBundleVolSrc := &v1.SecretVolumeSource{ - SecretName: c.store.Spec.Gateway.CaBundleRef, - } - secretType, err := c.rgwTLSSecretType(c.store.Spec.Gateway.CaBundleRef) - if err != nil { - return nil, err - } - if secretType != v1.SecretTypeOpaque { - return nil, errors.New("CaBundle secret should be 'Opaque' type") - } - caBundleVolSrc.Items = []v1.KeyToPath{ - {Key: caBundleKeyName, Path: caBundleFileName, Mode: &userReadOnly}, - } - return caBundleVolSrc, nil -} - -func (c *clusterConfig) rgwTLSSecretType(secretName string) (v1.SecretType, error) { - rgwTlsSecret, err := c.context.Clientset.CoreV1().Secrets(c.clusterInfo.Namespace).Get(context.TODO(), secretName, metav1.GetOptions{}) - if rgwTlsSecret != nil { - return rgwTlsSecret.Type, nil - } - return "", errors.Wrapf(err, "no Kubernetes secrets referring TLS certificates found") -} diff --git a/pkg/operator/ceph/object/spec_test.go b/pkg/operator/ceph/object/spec_test.go deleted file mode 100644 index f47b7a2bc..000000000 --- a/pkg/operator/ceph/object/spec_test.go +++ /dev/null @@ -1,382 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - "fmt" - "testing" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" - cephconfig "github.com/rook/rook/pkg/operator/ceph/config" - cephtest "github.com/rook/rook/pkg/operator/ceph/test" - cephver "github.com/rook/rook/pkg/operator/ceph/version" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestPodSpecs(t *testing.T) { - store := simpleStore() - store.Spec.Gateway.Resources = v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(200.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(1337.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(500.0, resource.BinarySI), - }, - } - store.Spec.Gateway.PriorityClassName = "my-priority-class" - info := clienttest.CreateTestClusterInfo(1) - info.CephVersion = cephver.Nautilus - data := cephconfig.NewStatelessDaemonDataPathMap(cephconfig.RgwType, "default", "rook-ceph", "/var/lib/rook/") - - c := &clusterConfig{ - clusterInfo: info, - store: store, - rookVersion: "rook/rook:myversion", - clusterSpec: &cephv1.ClusterSpec{ - CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:v15"}, - Network: cephv1.NetworkSpec{ - HostNetwork: true, - }, - }, - DataPathMap: data, - } - - resourceName := fmt.Sprintf("%s-%s", AppName, c.store.Name) - rgwConfig := &rgwConfig{ - ResourceName: resourceName, - } - - s, err := c.makeRGWPodSpec(rgwConfig) - assert.NoError(t, err) - - // Check pod anti affinity is well added to be compliant with HostNetwork setting - assert.Equal(t, - 1, - len(s.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution)) - assert.Equal(t, - getLabels(c.store.Name, c.store.Namespace, false), - s.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution[0].LabelSelector.MatchLabels) - - podTemplate := cephtest.NewPodTemplateSpecTester(t, &s) - podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", - "200", "100", "1337", "500", /* resources */ - "my-priority-class") -} - -func TestSSLPodSpec(t *testing.T) { - ctx := context.TODO() - // Placeholder - context := &clusterd.Context{Clientset: test.New(t, 3)} - - store := simpleStore() - store.Spec.Gateway.Resources = v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(200.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(1337.0, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), - v1.ResourceMemory: *resource.NewQuantity(500.0, resource.BinarySI), - }, - } - store.Spec.Gateway.PriorityClassName = "my-priority-class" - info := clienttest.CreateTestClusterInfo(1) - info.CephVersion = cephver.Nautilus - info.Namespace = store.Namespace - data := cephconfig.NewStatelessDaemonDataPathMap(cephconfig.RgwType, "default", "rook-ceph", "/var/lib/rook/") - store.Spec.Gateway.SecurePort = 443 - - c := &clusterConfig{ - clusterInfo: info, - store: store, - context: context, - rookVersion: "rook/rook:myversion", - clusterSpec: &cephv1.ClusterSpec{ - CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:v15"}, - Network: cephv1.NetworkSpec{ - HostNetwork: true, - }, - }, - DataPathMap: data, - } - - resourceName := fmt.Sprintf("%s-%s", AppName, c.store.Name) - rgwConfig := &rgwConfig{ - ResourceName: resourceName, - } - _, err := c.makeRGWPodSpec(rgwConfig) - // No TLS certs specified, will return error - assert.Error(t, err) - - // Using SSLCertificateRef - // Opaque Secret - c.store.Spec.Gateway.SSLCertificateRef = "mycert" - rgwtlssecret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.store.Spec.Gateway.SSLCertificateRef, - Namespace: c.store.Namespace, - }, - Data: map[string][]byte{ - "cert": []byte("tlssecrettesting"), - }, - Type: v1.SecretTypeOpaque, - } - _, err = c.context.Clientset.CoreV1().Secrets(store.Namespace).Create(ctx, rgwtlssecret, metav1.CreateOptions{}) - assert.NoError(t, err) - secretVolSrc, err := c.generateVolumeSourceWithTLSSecret() - assert.NoError(t, err) - assert.Equal(t, secretVolSrc.SecretName, "mycert") - s, err := c.makeRGWPodSpec(rgwConfig) - assert.NoError(t, err) - podTemplate := cephtest.NewPodTemplateSpecTester(t, &s) - podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", - "200", "100", "1337", "500", /* resources */ - "my-priority-class") - // TLS Secret - c.store.Spec.Gateway.SSLCertificateRef = "tlscert" - rgwtlssecret = &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.store.Spec.Gateway.SSLCertificateRef, - Namespace: c.store.Namespace, - }, - Data: map[string][]byte{ - "tls.crt": []byte("tlssecrettestingcert"), - "tls.key": []byte("tlssecrettestingkey"), - }, - Type: v1.SecretTypeTLS, - } - _, err = c.context.Clientset.CoreV1().Secrets(store.Namespace).Create(ctx, rgwtlssecret, metav1.CreateOptions{}) - assert.NoError(t, err) - secretVolSrc, err = c.generateVolumeSourceWithTLSSecret() - assert.NoError(t, err) - assert.Equal(t, secretVolSrc.SecretName, "tlscert") - s, err = c.makeRGWPodSpec(rgwConfig) - assert.NoError(t, err) - podTemplate = cephtest.NewPodTemplateSpecTester(t, &s) - podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", - "200", "100", "1337", "500", /* resources */ - "my-priority-class") - // Using service serving cert - c.store.Spec.Gateway.SSLCertificateRef = "" - c.store.Spec.Gateway.Service = &(cephv1.RGWServiceSpec{Annotations: rook.Annotations{cephv1.ServiceServingCertKey: "rgw-cert"}}) - secretVolSrc, err = c.generateVolumeSourceWithTLSSecret() - assert.NoError(t, err) - assert.Equal(t, secretVolSrc.SecretName, "rgw-cert") - // Using caBundleRef - // Opaque Secret - c.store.Spec.Gateway.CaBundleRef = "mycabundle" - cabundlesecret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.store.Spec.Gateway.CaBundleRef, - Namespace: c.store.Namespace, - }, - Data: map[string][]byte{ - "cabundle": []byte("cabundletesting"), - }, - Type: v1.SecretTypeOpaque, - } - _, err = c.context.Clientset.CoreV1().Secrets(store.Namespace).Create(ctx, cabundlesecret, metav1.CreateOptions{}) - assert.NoError(t, err) - caBundleVolSrc, err := c.generateVolumeSourceWithCaBundleSecret() - assert.NoError(t, err) - assert.Equal(t, caBundleVolSrc.SecretName, "mycabundle") - s, err = c.makeRGWPodSpec(rgwConfig) - assert.NoError(t, err) - podTemplate = cephtest.NewPodTemplateSpecTester(t, &s) - podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", - "200", "100", "1337", "500", /* resources */ - "my-priority-class") - - assert.True(t, s.Spec.HostNetwork) - assert.Equal(t, v1.DNSClusterFirstWithHostNet, s.Spec.DNSPolicy) - -} - -func TestValidateSpec(t *testing.T) { - executor := &exectest.MockExecutor{} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "crush" && args[2] == "dump" { - return `{"types":[{"type_id": 0,"name": "osd"}, {"type_id": 1,"name": "host"}],"buckets":[{"id": -1,"name":"default"},{"id": -2,"name":"good"}, {"id": -3,"name":"host"}]}`, nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - context := &clusterd.Context{Executor: executor} - - r := &ReconcileCephObjectStore{ - context: context, - clusterSpec: &cephv1.ClusterSpec{ - External: cephv1.ExternalSpec{ - Enable: false, - }, - }, - clusterInfo: &client.ClusterInfo{ - CephCred: client.CephCred{ - Username: "client.admin", - }, - }, - } - - // valid store - s := simpleStore() - err := r.validateStore(s) - assert.Nil(t, err) - - // no name - s.Name = "" - err = r.validateStore(s) - assert.NotNil(t, err) - s.Name = "default" - err = r.validateStore(s) - assert.Nil(t, err) - - // no namespace - s.Namespace = "" - err = r.validateStore(s) - assert.NotNil(t, err) - s.Namespace = "mycluster" - err = r.validateStore(s) - assert.Nil(t, err) - - // no replication or EC is valid - s.Spec.MetadataPool.Replicated.Size = 0 - err = r.validateStore(s) - assert.Nil(t, err) - s.Spec.MetadataPool.Replicated.Size = 1 - err = r.validateStore(s) - assert.Nil(t, err) - - // external with endpoints, success - s.Spec.Gateway.ExternalRgwEndpoints = []v1.EndpointAddress{ - { - IP: "192.168.0.1", - }, - } - err = r.validateStore(s) - assert.Nil(t, err) -} - -func TestGenerateLiveProbe(t *testing.T) { - store := simpleStore() - c := &clusterConfig{ - store: store, - clusterSpec: &cephv1.ClusterSpec{ - Network: cephv1.NetworkSpec{ - HostNetwork: false, - }, - }, - } - - // No SSL - HostNetwork is disabled - using internal port - p := c.generateLiveProbe() - assert.Equal(t, int32(8080), p.Handler.HTTPGet.Port.IntVal) - assert.Equal(t, v1.URISchemeHTTP, p.Handler.HTTPGet.Scheme) - - // No SSL - HostNetwork is enabled - c.store.Spec.Gateway.Port = 123 - c.store.Spec.Gateway.SecurePort = 0 - c.clusterSpec.Network.HostNetwork = true - p = c.generateLiveProbe() - assert.Equal(t, int32(123), p.Handler.HTTPGet.Port.IntVal) - - // SSL - HostNetwork is enabled - c.store.Spec.Gateway.Port = 0 - c.store.Spec.Gateway.SecurePort = 321 - c.store.Spec.Gateway.SSLCertificateRef = "foo" - p = c.generateLiveProbe() - assert.Equal(t, int32(321), p.Handler.HTTPGet.Port.IntVal) - - // Both Non-SSL and SSL are enabled - // liveprobe just on Non-SSL - c.store.Spec.Gateway.Port = 123 - c.store.Spec.Gateway.SecurePort = 321 - c.store.Spec.Gateway.SSLCertificateRef = "foo" - p = c.generateLiveProbe() - assert.Equal(t, v1.URISchemeHTTP, p.Handler.HTTPGet.Scheme) - assert.Equal(t, int32(123), p.Handler.HTTPGet.Port.IntVal) -} - -func TestCheckRGWKMS(t *testing.T) { - ctx := context.TODO() - // Placeholder - context := &clusterd.Context{Clientset: test.New(t, 3)} - store := simpleStore() - store.Spec.Security = &cephv1.SecuritySpec{KeyManagementService: cephv1.KeyManagementServiceSpec{ConnectionDetails: map[string]string{}}} - c := &clusterConfig{ - context: context, - store: store, - } - - // without KMS - b, err := c.CheckRGWKMS() - assert.False(t, b) - assert.NoError(t, err) - - // setting KMS configurations - c.store.Spec.Security.KeyManagementService.TokenSecretName = "vault-token" - c.store.Spec.Security.KeyManagementService.ConnectionDetails["KMS_PROVIDER"] = "vault" - c.store.Spec.Security.KeyManagementService.ConnectionDetails["VAULT_ADDR"] = "https://1.1.1.1:8200" - s := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: store.Spec.Security.KeyManagementService.TokenSecretName, - Namespace: store.Namespace, - }, - Data: map[string][]byte{ - "token": []byte("myt-otkenbenvqrev"), - }, - } - _, err = context.Clientset.CoreV1().Secrets(store.Namespace).Create(ctx, s, metav1.CreateOptions{}) - assert.NoError(t, err) - - // no secret engine set, will fail - b, err = c.CheckRGWKMS() - assert.False(t, b) - assert.Error(t, err) - - // kv engine version v1, will fail - c.store.Spec.Security.KeyManagementService.ConnectionDetails["VAULT_SECRET_ENGINE"] = "kv" - c.store.Spec.Security.KeyManagementService.ConnectionDetails["VAULT_BACKEND"] = "v1" - b, err = c.CheckRGWKMS() - assert.False(t, b) - assert.Error(t, err) - - // kv engine version v2, will pass - c.store.Spec.Security.KeyManagementService.ConnectionDetails["VAULT_BACKEND"] = "v2" - b, err = c.CheckRGWKMS() - assert.True(t, b) - assert.NoError(t, err) - - // transit engine, will pass - c.store.Spec.Security.KeyManagementService.ConnectionDetails["VAULT_SECRET_ENGINE"] = "transit" - c.store.Spec.Security.KeyManagementService.ConnectionDetails["VAULT_BACKEND"] = "" - b, err = c.CheckRGWKMS() - assert.True(t, b) - assert.NoError(t, err) -} diff --git a/pkg/operator/ceph/object/status.go b/pkg/operator/ceph/object/status.go deleted file mode 100644 index a369a981b..000000000 --- a/pkg/operator/ceph/object/status.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/reporting" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/retry" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ReconcileCephObjectStore) setFailedStatus(name types.NamespacedName, errMessage string, err error) (reconcile.Result, error) { - updateStatus(r.client, name, cephv1.ConditionFailure, map[string]string{}) - return reconcile.Result{}, errors.Wrapf(err, "%s", errMessage) -} - -// updateStatus updates an object with a given status -func updateStatus(client client.Client, namespacedName types.NamespacedName, status cephv1.ConditionType, info map[string]string) { - // Updating the status is important to users, but we can still keep operating if there is a - // failure. Retry a few times to give it our best effort attempt. - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - objectStore := &cephv1.CephObjectStore{} - if err := client.Get(context.TODO(), namespacedName, objectStore); err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephObjectStore resource not found. Ignoring since object must be deleted.") - return nil - } - return errors.Wrapf(err, "failed to retrieve object store %q to update status to %q", namespacedName.String(), status) - } - if objectStore.Status == nil { - objectStore.Status = &cephv1.ObjectStoreStatus{} - } - - if objectStore.Status.Phase == cephv1.ConditionDeleting { - logger.Debugf("object store %q status not updated to %q because it is deleting", namespacedName.String(), status) - return nil // do not transition to other statuses once deletion begins - } - - objectStore.Status.Phase = status - objectStore.Status.Info = info - - if err := reporting.UpdateStatus(client, objectStore); err != nil { - return errors.Wrapf(err, "failed to set object store %q status to %q", namespacedName.String(), status) - } - return nil - }) - if err != nil { - logger.Error(err) - } - - logger.Debugf("object store %q status updated to %q", namespacedName.String(), status) -} - -// updateStatusBucket updates an object with a given status -func updateStatusBucket(client client.Client, name types.NamespacedName, status cephv1.ConditionType, details string) { - // Updating the status is important to users, but we can still keep operating if there is a - // failure. Retry a few times to give it our best effort attempt. - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - objectStore := &cephv1.CephObjectStore{} - if err := client.Get(context.TODO(), name, objectStore); err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephObjectStore resource not found. Ignoring since object must be deleted.") - return nil - } - return errors.Wrapf(err, "failed to retrieve object store %q to update status to %v", name.String(), status) - } - if objectStore.Status == nil { - objectStore.Status = &cephv1.ObjectStoreStatus{} - } - objectStore.Status.BucketStatus = toCustomResourceStatus(objectStore.Status.BucketStatus, details, status) - - if objectStore.Status.Phase != cephv1.ConditionDeleting { - // do not transition to to other statuses once deletion begins - logger.Debugf("object store %q status not updated to %q because it is deleting", name.String(), status) - objectStore.Status.Phase = status - } - - if err := reporting.UpdateStatus(client, objectStore); err != nil { - return errors.Wrapf(err, "failed to set object store %q status to %v", name.String(), status) - } - return nil - }) - if err != nil { - logger.Error(err) - } - - logger.Debugf("object store %q status updated to %v", name.String(), status) -} - -func buildStatusInfo(cephObjectStore *cephv1.CephObjectStore) map[string]string { - m := make(map[string]string) - - if cephObjectStore.Spec.Gateway.SecurePort != 0 && cephObjectStore.Spec.Gateway.Port != 0 { - m["secureEndpoint"] = BuildDNSEndpoint(BuildDomainName(cephObjectStore.Name, cephObjectStore.Namespace), cephObjectStore.Spec.Gateway.SecurePort, true) - m["endpoint"] = BuildDNSEndpoint(BuildDomainName(cephObjectStore.Name, cephObjectStore.Namespace), cephObjectStore.Spec.Gateway.Port, false) - } else if cephObjectStore.Spec.Gateway.SecurePort != 0 { - m["endpoint"] = BuildDNSEndpoint(BuildDomainName(cephObjectStore.Name, cephObjectStore.Namespace), cephObjectStore.Spec.Gateway.SecurePort, true) - } else { - m["endpoint"] = BuildDNSEndpoint(BuildDomainName(cephObjectStore.Name, cephObjectStore.Namespace), cephObjectStore.Spec.Gateway.Port, false) - } - - return m -} diff --git a/pkg/operator/ceph/object/status_test.go b/pkg/operator/ceph/object/status_test.go deleted file mode 100644 index be8b743c9..000000000 --- a/pkg/operator/ceph/object/status_test.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestBuildStatusInfo(t *testing.T) { - // Port enabled and SecurePort disabled - cephObjectStore := &cephv1.CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-store", - Namespace: "rook-ceph", - }, - } - cephObjectStore.Spec.Gateway.Port = 80 - - statusInfo := buildStatusInfo(cephObjectStore) - assert.NotEmpty(t, statusInfo["endpoint"]) - assert.Empty(t, statusInfo["secureEndpoint"]) - assert.Equal(t, "http://rook-ceph-rgw-my-store.rook-ceph.svc:80", statusInfo["endpoint"]) - - // SecurePort enabled and Port disabled - cephObjectStore.Spec.Gateway.Port = 0 - cephObjectStore.Spec.Gateway.SecurePort = 443 - - statusInfo = buildStatusInfo(cephObjectStore) - assert.NotEmpty(t, statusInfo["endpoint"]) - assert.Empty(t, statusInfo["secureEndpoint"]) - assert.Equal(t, "https://rook-ceph-rgw-my-store.rook-ceph.svc:443", statusInfo["endpoint"]) - - // Both Port and SecurePort enabled - cephObjectStore.Spec.Gateway.Port = 80 - cephObjectStore.Spec.Gateway.SecurePort = 443 - - statusInfo = buildStatusInfo(cephObjectStore) - assert.NotEmpty(t, statusInfo["endpoint"]) - assert.NotEmpty(t, statusInfo["secureEndpoint"]) - assert.Equal(t, "http://rook-ceph-rgw-my-store.rook-ceph.svc:80", statusInfo["endpoint"]) - assert.Equal(t, "https://rook-ceph-rgw-my-store.rook-ceph.svc:443", statusInfo["secureEndpoint"]) -} diff --git a/pkg/operator/ceph/object/user.go b/pkg/operator/ceph/object/user.go deleted file mode 100644 index a5a7ad4e2..000000000 --- a/pkg/operator/ceph/object/user.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "encoding/json" - "strings" - "syscall" - - "github.com/ceph/go-ceph/rgw/admin" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/util/exec" -) - -const ( - RGWErrorNone = iota - RGWErrorUnknown - RGWErrorNotFound - RGWErrorBadData - RGWErrorParse - ErrorCodeFileExists = 17 -) - -// An ObjectUser defines the details of an object store user. -type ObjectUser struct { - UserID string `json:"userId"` - DisplayName *string `json:"displayName"` - Email *string `json:"email"` - AccessKey *string `json:"accessKey"` - SecretKey *string `json:"secretKey"` - SystemUser bool `json:"systemuser"` - AdminOpsUser bool `json:"adminopsuser"` -} - -// func decodeUser(data string) (*ObjectUser, int, error) { -func decodeUser(data string) (*ObjectUser, int, error) { - var user admin.User - err := json.Unmarshal([]byte(data), &user) - if err != nil { - return nil, RGWErrorParse, errors.Wrapf(err, "failed to unmarshal json. %s", data) - } - - rookUser := ObjectUser{UserID: user.ID, DisplayName: &user.DisplayName, Email: &user.Email} - - if len(user.Keys) > 0 { - rookUser.AccessKey = &user.Keys[0].AccessKey - rookUser.SecretKey = &user.Keys[0].SecretKey - } else { - return nil, RGWErrorBadData, errors.New("AccessKey and SecretKey are missing") - } - - return &rookUser, RGWErrorNone, nil -} - -// GetUser returns the user with the given ID. -// The function is used **ONCE** only to provision so the RGW Admin Ops User -// Subsequent interaction with the API will be done with the created user -func GetUser(c *Context, id string) (*ObjectUser, int, error) { - logger.Debugf("getting s3 user %q", id) - // note: err is set for non-existent user but result output is also empty - result, err := runAdminCommand(c, false, "user", "info", "--uid", id) - if strings.Contains(result, "no user info saved") { - return nil, RGWErrorNotFound, errors.New("warn: s3 user not found") - } - if err != nil { - return nil, RGWErrorUnknown, errors.Wrapf(err, "radosgw-admin command err. %s", result) - } - match, err := extractJSON(result) - if err != nil { - return nil, RGWErrorParse, errors.Wrap(err, "failed to get json") - } - return decodeUser(match) -} - -// CreateUser creates a new user with the information given. -// The function is used **ONCE** only to provision so the RGW Admin Ops User -// Subsequent interaction with the API will be done with the created user -func CreateUser(c *Context, user ObjectUser) (*ObjectUser, int, error) { - logger.Debugf("creating s3 user %q", user.UserID) - - if strings.TrimSpace(user.UserID) == "" { - return nil, RGWErrorBadData, errors.New("userId cannot be empty") - } - - if user.DisplayName == nil { - return nil, RGWErrorBadData, errors.New("displayName is required") - } - - args := []string{ - "user", - "create", - "--uid", user.UserID, - "--display-name", *user.DisplayName, - } - - if user.Email != nil { - args = append(args, "--email", *user.Email) - } - - if user.SystemUser { - args = append(args, "--system") - } - - if user.AdminOpsUser { - args = append(args, "--caps", rgwAdminOpsUserCaps) - } - - result, err := runAdminCommand(c, true, args...) - if err != nil { - if strings.Contains(result, "could not create user: unable to create user, user: ") { - return nil, ErrorCodeFileExists, errors.New("s3 user already exists") - } - - if strings.Contains(result, "could not create user: unable to create user, email: ") && strings.Contains(result, " is the email address an existing user") { - return nil, RGWErrorBadData, errors.New("email already in use") - } - - if strings.Contains(result, "global_init: unable to open config file from search list") { - return nil, RGWErrorUnknown, errors.New("skipping reconcile since operator is still initializing") - } - - // We don't know what happened - return nil, RGWErrorUnknown, errors.Wrapf(err, "failed to create s3 user. %s", result) - } - return decodeUser(result) -} - -func ListUserBuckets(c *Context, id string, opts ...string) (string, error) { - - args := []string{"bucket", "list", "--uid", id} - if opts != nil { - args = append(args, opts...) - } - - result, err := runAdminCommand(c, false, args...) - - return result, errors.Wrapf(err, "failed to list buckets for user uid=%q", id) -} - -// DeleteUser deletes the user with the given ID. -// Even though we should be using the Admin Ops API, we keep this on purpose until the entire migration is completed -// Used for the dashboard user -func DeleteUser(c *Context, id string, opts ...string) (string, error) { - args := []string{"user", "rm", "--uid", id} - if opts != nil { - args = append(args, opts...) - } - result, err := runAdminCommand(c, false, args...) - if err != nil { - // If User does not exist return success - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - return result, nil - } - - res, innerErr := ListUserBuckets(c, id) - if innerErr == nil && res != "" && res != "[]" { - return result, errors.Wrapf(err, "s3 user uid=%q have following buckets %q", id, res) - } - } - - return result, errors.Wrapf(err, "failed to delete s3 user uid=%q", id) -} diff --git a/pkg/operator/ceph/object/user/controller.go b/pkg/operator/ceph/object/user/controller.go deleted file mode 100644 index 66f613242..000000000 --- a/pkg/operator/ceph/object/user/controller.go +++ /dev/null @@ -1,529 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package objectuser to manage a rook object store user. -package objectuser - -import ( - "context" - "fmt" - "reflect" - - "github.com/ceph/go-ceph/rgw/admin" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/object" - "github.com/rook/rook/pkg/operator/k8sutil" - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - appName = object.AppName - controllerName = "ceph-object-store-user-controller" -) - -// newMultisiteAdminOpsCtxFunc help us mocking the admin ops API client in unit test -var newMultisiteAdminOpsCtxFunc = object.NewMultisiteAdminOpsContext - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -var cephObjectStoreUserKind = reflect.TypeOf(cephv1.CephObjectStoreUser{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephObjectStoreUserKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -// ReconcileObjectStoreUser reconciles a ObjectStoreUser object -type ReconcileObjectStoreUser struct { - client client.Client - scheme *runtime.Scheme - context *clusterd.Context - objContext *object.AdminOpsContext - userConfig *admin.User - cephClusterSpec *cephv1.ClusterSpec - clusterInfo *cephclient.ClusterInfo -} - -// Add creates a new CephObjectStoreUser Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - - return &ReconcileObjectStoreUser{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the CephObjectStoreUser CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephObjectStoreUser{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - // Watch secrets - err = c.Watch(&source.Kind{Type: &corev1.Secret{TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: corev1.SchemeGroupVersion.String()}}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cephv1.CephObjectStoreUser{}, - }, opcontroller.WatchPredicateForNonCRDObject(&cephv1.CephObjectStoreUser{TypeMeta: controllerTypeMeta}, mgr.GetScheme())) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a CephObjectStoreUser object and makes changes based on the state read -// and what is in the CephObjectStoreUser.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileObjectStoreUser) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, err := r.reconcile(request) - if err != nil { - logger.Errorf("failed to reconcile %v", err) - } - - return reconcileResponse, err -} - -func (r *ReconcileObjectStoreUser) reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the CephObjectStoreUser instance - cephObjectStoreUser := &cephv1.CephObjectStoreUser{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephObjectStoreUser) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephObjectStoreUser resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, errors.Wrap(err, "failed to get CephObjectStoreUser") - } - - // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephObjectStoreUser) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") - } - - // The CR was just created, initializing status fields - if cephObjectStoreUser.Status == nil { - updateStatus(r.client, request.NamespacedName, k8sutil.EmptyStatus) - } - - // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - // This handles the case where the Ceph Cluster is gone and we want to delete that CR - // We skip the deleteUser() function since everything is gone already - // - // Also, only remove the finalizer if the CephCluster is gone - // If not, we should wait for it to be ready - // This handles the case where the operator is not ready to accept Ceph command but the cluster exists - if !cephObjectStoreUser.GetDeletionTimestamp().IsZero() && !cephClusterExists { - // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephObjectStoreUser) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - return reconcileResponse, nil - } - r.cephClusterSpec = &cephCluster.Spec - - // Populate clusterInfo during each reconcile - r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to populate cluster info") - } - - // Validate the object store has been initialized - err = r.initializeObjectStoreContext(cephObjectStoreUser) - if err != nil { - if !cephObjectStoreUser.GetDeletionTimestamp().IsZero() { - // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephObjectStoreUser) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - logger.Debugf("ObjectStore resource not ready in namespace %q, retrying in %q. %v", - request.NamespacedName.Namespace, opcontroller.WaitForRequeueIfCephClusterNotReady.RequeueAfter.String(), err) - updateStatus(r.client, request.NamespacedName, k8sutil.ReconcileFailedStatus) - return opcontroller.WaitForRequeueIfCephClusterNotReady, nil - } - - // Generate user config - userConfig := generateUserConfig(cephObjectStoreUser) - r.userConfig = &userConfig - - // DELETE: the CR was deleted - if !cephObjectStoreUser.GetDeletionTimestamp().IsZero() { - logger.Debugf("deleting pool %q", cephObjectStoreUser.Name) - err := r.deleteUser(cephObjectStoreUser) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to delete ceph object user %q", cephObjectStoreUser.Name) - } - - // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephObjectStoreUser) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - - // validate the user settings - err = r.validateUser(cephObjectStoreUser) - if err != nil { - updateStatus(r.client, request.NamespacedName, k8sutil.ReconcileFailedStatus) - return reconcile.Result{}, errors.Wrapf(err, "invalid pool CR %q spec", cephObjectStoreUser.Name) - } - - // CREATE/UPDATE CEPH USER - reconcileResponse, err = r.reconcileCephUser(cephObjectStoreUser) - if err != nil { - updateStatus(r.client, request.NamespacedName, k8sutil.ReconcileFailedStatus) - return reconcileResponse, err - } - - // CREATE/UPDATE KUBERNETES SECRET - reconcileResponse, err = r.reconcileCephUserSecret(cephObjectStoreUser) - if err != nil { - updateStatus(r.client, request.NamespacedName, k8sutil.ReconcileFailedStatus) - return reconcileResponse, err - } - - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, k8sutil.ReadyStatus) - - // Return and do not requeue - logger.Debug("done reconciling") - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectStoreUser) reconcileCephUser(cephObjectStoreUser *cephv1.CephObjectStoreUser) (reconcile.Result, error) { - err := r.createorUpdateCephUser(cephObjectStoreUser) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to create/update object store user %q", cephObjectStoreUser.Name) - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectStoreUser) createorUpdateCephUser(u *cephv1.CephObjectStoreUser) error { - logger.Infof("creating ceph object user %q in namespace %q", u.Name, u.Namespace) - - logCreateOrUpdate := fmt.Sprintf("retrieved existing ceph object user %q", u.Name) - var user admin.User - var err error - user, err = r.objContext.AdminOpsClient.GetUser(context.TODO(), *r.userConfig) - if err != nil { - if errors.Is(err, admin.ErrNoSuchUser) { - user, err = r.objContext.AdminOpsClient.CreateUser(context.TODO(), *r.userConfig) - if err != nil { - return errors.Wrapf(err, "failed to create ceph object user %v", &r.userConfig.ID) - } - logCreateOrUpdate = fmt.Sprintf("created ceph object user %q", u.Name) - } else { - return errors.Wrapf(err, "failed to get details from ceph object user %q", u.Name) - } - } - - // Set access and secret key - r.userConfig.Keys[0].AccessKey = user.Keys[0].AccessKey - r.userConfig.Keys[0].SecretKey = user.Keys[0].SecretKey - - logger.Info(logCreateOrUpdate) - return nil -} - -func (r *ReconcileObjectStoreUser) initializeObjectStoreContext(u *cephv1.CephObjectStoreUser) error { - err := r.objectStoreInitialized(u) - if err != nil { - return errors.Wrapf(err, "failed to detect if object store %q is initialized", u.Spec.Store) - } - - store, err := r.getObjectStore(u.Spec.Store) - if err != nil { - return errors.Wrapf(err, "failed to get object store %q", u.Spec.Store) - } - - objContext, err := object.NewMultisiteContext(r.context, r.clusterInfo, store) - if err != nil { - return errors.Wrapf(err, "Multisite failed to set on object context for object store user") - } - - // The object store context needs the CephCluster spec to read networkinfo - // Otherwise GetAdminOPSUserCredentials() will fail detecting the network provider when running RunAdminCommandNoMultisite() - objContext.CephClusterSpec = *r.cephClusterSpec - - opsContext, err := newMultisiteAdminOpsCtxFunc(objContext, &store.Spec) - if err != nil { - return errors.Wrap(err, "failed to initialized rgw admin ops client api") - } - r.objContext = opsContext - - return nil -} - -func generateUserConfig(user *cephv1.CephObjectStoreUser) admin.User { - // Set DisplayName to match Name if DisplayName is not set - displayName := user.Spec.DisplayName - if len(displayName) == 0 { - displayName = user.Name - } - - // create the user - userConfig := admin.User{ - ID: user.Name, - DisplayName: displayName, - Keys: make([]admin.UserKeySpec, 1), - } - - return userConfig -} - -func generateCephUserSecretName(u *cephv1.CephObjectStoreUser) string { - return fmt.Sprintf("rook-ceph-object-user-%s-%s", u.Spec.Store, u.Name) -} - -func generateStatusInfo(u *cephv1.CephObjectStoreUser) map[string]string { - m := make(map[string]string) - m["secretName"] = generateCephUserSecretName(u) - return m -} - -func (r *ReconcileObjectStoreUser) generateCephUserSecret(u *cephv1.CephObjectStoreUser) *corev1.Secret { - // Store the keys in a secret - secrets := map[string]string{ - "AccessKey": r.userConfig.Keys[0].AccessKey, - "SecretKey": r.userConfig.Keys[0].SecretKey, - "Endpoint": r.objContext.Endpoint, - } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: generateCephUserSecretName(u), - Namespace: u.Namespace, - Labels: map[string]string{ - "app": appName, - "user": u.Name, - "rook_cluster": u.Namespace, - "rook_object_store": u.Spec.Store, - }, - }, - StringData: secrets, - Type: k8sutil.RookType, - } - return secret -} - -func (r *ReconcileObjectStoreUser) reconcileCephUserSecret(cephObjectStoreUser *cephv1.CephObjectStoreUser) (reconcile.Result, error) { - // Generate Kubernetes Secret - secret := r.generateCephUserSecret(cephObjectStoreUser) - - // Set owner ref to the object store user object - err := controllerutil.SetControllerReference(cephObjectStoreUser, secret, r.scheme) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to set owner reference of ceph object user secret %q", secret.Name) - } - - // Create Kubernetes Secret - err = opcontroller.CreateOrUpdateObject(r.client, secret) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to create or update ceph object user %q secret", secret.Name) - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectStoreUser) objectStoreInitialized(cephObjectStoreUser *cephv1.CephObjectStoreUser) error { - _, err := r.getObjectStore(cephObjectStoreUser.Spec.Store) - if err != nil { - return err - } - logger.Debug("CephObjectStore exists") - - // If the cluster is external just return - // since there are no pods running - if r.cephClusterSpec.External.Enable { - return nil - } - - // There are no pods running when the cluster is external - // Unless you pass the admin key... - pods, err := r.getRgwPodList(cephObjectStoreUser) - if err != nil { - return err - } - - // check if at least one pod is running - if len(pods.Items) > 0 { - logger.Debugf("CephObjectStore %q is running with %d pods", cephObjectStoreUser.Name, len(pods.Items)) - return nil - } - - return errors.New("no rgw pod found") -} - -func (r *ReconcileObjectStoreUser) getObjectStore(storeName string) (*cephv1.CephObjectStore, error) { - // check if CephObjectStore CR is created - objectStores := &cephv1.CephObjectStoreList{} - err := r.client.List(context.TODO(), objectStores) - if err != nil { - if kerrors.IsNotFound(err) { - return nil, errors.Wrapf(err, "CephObjectStore %q could not be found", storeName) - } - return nil, errors.Wrap(err, "failed to get CephObjectStore") - } - - for _, store := range objectStores.Items { - if store.Name == storeName { - logger.Infof("CephObjectStore %q found", storeName) - return &store, nil - } - } - - return nil, errors.Errorf("CephObjectStore %q could not be found", storeName) -} - -func (r *ReconcileObjectStoreUser) getRgwPodList(cephObjectStoreUser *cephv1.CephObjectStoreUser) (*corev1.PodList, error) { - pods := &corev1.PodList{} - - // check if ObjectStore is initialized - // rook does this by starting the RGW pod(s) - listOpts := []client.ListOption{ - client.InNamespace(cephObjectStoreUser.Namespace), - client.MatchingLabels(labelsForRgw(cephObjectStoreUser.Spec.Store)), - } - - err := r.client.List(context.TODO(), pods, listOpts...) - if err != nil { - if kerrors.IsNotFound(err) { - return pods, errors.Wrap(err, "no rgw pod could not be found") - } - return pods, errors.Wrap(err, "failed to list rgw pods") - } - - return pods, nil -} - -// Delete the user -func (r *ReconcileObjectStoreUser) deleteUser(u *cephv1.CephObjectStoreUser) error { - err := r.objContext.AdminOpsClient.RemoveUser(context.TODO(), admin.User{ID: u.Name}) - if err != nil { - if errors.Is(err, admin.ErrNoSuchUser) { - logger.Warningf("user %q does not exist, nothing to remove", u.Name) - return nil - } - return errors.Wrapf(err, "failed to delete ceph object user %q.", u.Name) - } - - logger.Infof("ceph object user %q deleted successfully", u.Name) - return nil -} - -// validateUser validates the user arguments -func (r *ReconcileObjectStoreUser) validateUser(u *cephv1.CephObjectStoreUser) error { - if u.Name == "" { - return errors.New("missing name") - } - if u.Namespace == "" { - return errors.New("missing namespace") - } - if !r.cephClusterSpec.External.Enable { - if u.Spec.Store == "" { - return errors.New("missing store") - } - } - return nil -} - -func labelsForRgw(name string) map[string]string { - return map[string]string{"rgw": name, k8sutil.AppAttr: appName} -} - -// updateStatus updates an object with a given status -func updateStatus(client client.Client, name types.NamespacedName, status string) { - user := &cephv1.CephObjectStoreUser{} - if err := client.Get(context.TODO(), name, user); err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephObjectStoreUser resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve object store user %q to update status to %q. %v", name, status, err) - return - } - if user.Status == nil { - user.Status = &cephv1.ObjectStoreUserStatus{} - } - - user.Status.Phase = status - if user.Status.Phase == k8sutil.ReadyStatus { - user.Status.Info = generateStatusInfo(user) - } - if err := reporting.UpdateStatus(client, user); err != nil { - logger.Errorf("failed to set object store user %q status to %q. %v", name, status, err) - return - } - logger.Debugf("object store user %q status updated to %q", name, status) -} diff --git a/pkg/operator/ceph/object/user/controller_test.go b/pkg/operator/ceph/object/user/controller_test.go deleted file mode 100644 index b2fec0a04..000000000 --- a/pkg/operator/ceph/object/user/controller_test.go +++ /dev/null @@ -1,355 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package objectuser to manage a rook object store. -package objectuser - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "net/http" - "testing" - "time" - - "github.com/ceph/go-ceph/rgw/admin" - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - - "github.com/rook/rook/pkg/clusterd" - cephobject "github.com/rook/rook/pkg/operator/ceph/object" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - userCreateJSON = `{ - "user_id": "my-user", - "display_name": "my-user", - "email": "", - "suspended": 0, - "max_buckets": 1000, - "subusers": [], - "keys": [ - { - "user": "my-user", - "access_key": "EOE7FYCNOBZJ5VFV909G", - "secret_key": "qmIqpWm8HxCzmynCrD6U6vKWi4hnDBndOnmxXNsV" - } - ], - "swift_keys": [], - "caps": [], - "op_mask": "read, write, delete", - "default_placement": "", - "default_storage_class": "", - "placement_tags": [], - "bucket_quota": { - "enabled": false, - "check_on_raw": false, - "max_size": -1, - "max_size_kb": 0, - "max_objects": -1 - }, - "user_quota": { - "enabled": false, - "check_on_raw": false, - "max_size": -1, - "max_size_kb": 0, - "max_objects": -1 - }, - "temp_url_keys": [], - "type": "rgw", - "mfa_ids": [] -}` -) - -var ( - name = "my-user" - namespace = "rook-ceph" - store = "my-store" -) - -func TestCephObjectStoreUserController(t *testing.T) { - ctx := context.TODO() - // Set DEBUG logging - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - - // - // TEST 1 SETUP - // - // FAILURE because no CephCluster - // - // A Pool resource with metadata and spec. - objectUser := &cephv1.CephObjectStoreUser{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: cephv1.ObjectStoreUserSpec{ - Store: store, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectStoreUser", - }, - } - cephCluster := &cephv1.CephCluster{} - - // Objects to track in the fake client. - object := []runtime.Object{ - objectUser, - cephCluster, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - return "", nil - }, - } - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectStoreUser{}, &cephv1.CephCluster{}, &cephv1.CephClusterList{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileObjectStoreUser object with the scheme and fake client. - r := &ReconcileObjectStoreUser{client: cl, scheme: s, context: c} - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - logger.Info("STARTING PHASE 1") - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 1 DONE") - - // - // TEST 2: - // - // FAILURE we have a cluster but it's not ready - // - cephCluster = &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - object = append(object, cephCluster) - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileObjectStoreUser object with the scheme and fake client. - r = &ReconcileObjectStoreUser{client: cl, scheme: s, context: c} - logger.Info("STARTING PHASE 2") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 2 DONE") - - // - // TEST 3: - // - // FAILURE! The CephCluster is ready but NO rgw object - // - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "user" { - return userCreateJSON, nil - } - return "", nil - }, - } - c.Executor = executor - - // Create a ReconcileObjectStoreUser object with the scheme and fake client. - r = &ReconcileObjectStoreUser{client: cl, scheme: s, context: c} - - logger.Info("STARTING PHASE 3") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 3 DONE") - - // - // TEST 4: - // - // FAILURE! The CephCluster is ready - // Rgw object exists but NO pod are running - // - cephObjectStore := &cephv1.CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: store, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectStore", - }, - Spec: cephv1.ObjectStoreSpec{ - Gateway: cephv1.GatewaySpec{ - Port: 80, - }, - }, - Status: &cephv1.ObjectStoreStatus{ - Info: map[string]string{"endpoint": "http://rook-ceph-rgw-my-store.rook-ceph:80"}, - }, - } - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectStore{}) - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectStoreList{}) - object = append(object, cephObjectStore) - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileObjectStoreUser object with the scheme and fake client. - r = &ReconcileObjectStoreUser{client: cl, scheme: s, context: c} - - logger.Info("STARTING PHASE 4") - err = r.client.Get(context.TODO(), types.NamespacedName{Name: store, Namespace: namespace}, cephObjectStore) - assert.NoError(t, err, cephObjectStore) - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 4 DONE") - - // - // TEST 5: - // - // SUCCESS! The CephCluster is ready - // Rgw object exists and pods are running - // - rgwPod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-rgw-my-store-a-5fd6fb4489-xv65v", - Namespace: namespace, - Labels: map[string]string{k8sutil.AppAttr: appName, "rgw": "my-store"}}} - - // Get the updated object. - logger.Info("STARTING PHASE 5") - // Create RGW pod - err = r.client.Create(context.TODO(), rgwPod) - assert.NoError(t, err) - - // Mock client - newMultisiteAdminOpsCtxFunc = func(objContext *cephobject.Context, spec *cephv1.ObjectStoreSpec) (*cephobject.AdminOpsContext, error) { - mockClient := &cephobject.MockClient{ - MockDo: func(req *http.Request) (*http.Response, error) { - if req.URL.RawQuery == "display-name=my-user&format=json&uid=my-user" && req.Method == http.MethodGet && req.URL.Path == "rook-ceph-rgw-my-store.mycluster.svc/admin/user" { - return &http.Response{ - StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewReader([]byte(userCreateJSON))), - }, nil - } - return nil, fmt.Errorf("unexpected request: %q. method %q. path %q", req.URL.RawQuery, req.Method, req.URL.Path) - }, - } - - context, err := cephobject.NewMultisiteContext(r.context, r.clusterInfo, cephObjectStore) - assert.NoError(t, err) - adminClient, err := admin.New("rook-ceph-rgw-my-store.mycluster.svc", "53S6B9S809NUP19IJ2K3", "1bXPegzsGClvoGAiJdHQD1uOW2sQBLAZM9j9VtXR", mockClient) - assert.NoError(t, err) - - return &cephobject.AdminOpsContext{ - Context: *context, - AdminOpsUserAccessKey: "53S6B9S809NUP19IJ2K3", - AdminOpsUserSecretKey: "1bXPegzsGClvoGAiJdHQD1uOW2sQBLAZM9j9VtXR", - AdminOpsClient: adminClient, - }, nil - } - - // Run reconcile - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, objectUser) - assert.NoError(t, err) - assert.Equal(t, "Ready", objectUser.Status.Phase, objectUser) - logger.Info("PHASE 5 DONE") -} - -func TestBuildUpdateStatusInfo(t *testing.T) { - cephObjectStoreUser := &cephv1.CephObjectStoreUser{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: cephv1.ObjectStoreUserSpec{ - Store: store, - }, - } - - statusInfo := generateStatusInfo(cephObjectStoreUser) - assert.NotEmpty(t, statusInfo["secretName"]) - assert.Equal(t, "rook-ceph-object-user-my-store-my-user", statusInfo["secretName"]) -} diff --git a/pkg/operator/ceph/object/zone/controller.go b/pkg/operator/ceph/object/zone/controller.go deleted file mode 100644 index 975a19262..000000000 --- a/pkg/operator/ceph/object/zone/controller.go +++ /dev/null @@ -1,371 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package zone to manage a rook object zone. -package zone - -import ( - "context" - "fmt" - "reflect" - "syscall" - "time" - - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/object" - "github.com/rook/rook/pkg/operator/ceph/pool" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - controllerName = "ceph-object-zone-controller" -) - -var waitForRequeueIfObjectZoneGroupNotReady = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -var cephObjectZoneKind = reflect.TypeOf(cephv1.CephObjectZone{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephObjectZoneKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -// ReconcileObjectZone reconciles a ObjectZone object -type ReconcileObjectZone struct { - client client.Client - scheme *runtime.Scheme - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - clusterSpec *cephv1.ClusterSpec -} - -// Add creates a new CephObjectZone Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - return &ReconcileObjectZone{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the CephObjectZone CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephObjectZone{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a CephObjectZone object and makes changes based on the state read -// and what is in the CephObjectZone.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileObjectZone) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, err := r.reconcile(request) - if err != nil { - logger.Errorf("failed to reconcile: %v", err) - } - - return reconcileResponse, err -} - -func (r *ReconcileObjectZone) reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the CephObjectZone instance - cephObjectZone := &cephv1.CephObjectZone{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephObjectZone) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephObjectZone resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, errors.Wrap(err, "failed to get CephObjectZone") - } - - // The CR was just created, initializing status fields - if cephObjectZone.Status == nil { - updateStatus(r.client, request.NamespacedName, k8sutil.EmptyStatus) - } - - // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - // This handles the case where the Ceph Cluster is gone and we want to delete that CR - // - if !cephObjectZone.GetDeletionTimestamp().IsZero() && !cephClusterExists { - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - return reconcileResponse, nil - } - r.clusterSpec = &cephCluster.Spec - - // DELETE: the CR was deleted - if !cephObjectZone.GetDeletionTimestamp().IsZero() { - logger.Debugf("deleting zone CR %q", cephObjectZone.Name) - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - - // Populate clusterInfo during each reconcile - r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to populate cluster info") - } - - // validate the zone settings - err = r.validateZoneCR(cephObjectZone) - if err != nil { - updateStatus(r.client, request.NamespacedName, k8sutil.ReconcileFailedStatus) - return reconcile.Result{}, errors.Wrapf(err, "invalid CephObjectZone CR %q", cephObjectZone.Name) - } - - // Start object reconciliation, updating status for this - updateStatus(r.client, request.NamespacedName, k8sutil.ReconcilingStatus) - - // Make sure an ObjectZoneGroup is present - realmName, reconcileResponse, err := r.reconcileObjectZoneGroup(cephObjectZone) - if err != nil { - return reconcileResponse, err - } - - // Make sure zone group has been created in Ceph Cluster - reconcileResponse, err = r.reconcileCephZoneGroup(cephObjectZone, realmName) - if err != nil { - return reconcileResponse, err - } - - // Create Ceph Zone - _, err = r.createCephZone(cephObjectZone, realmName) - if err != nil { - return r.setFailedStatus(request.NamespacedName, "failed to create ceph zone", err) - } - - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, k8sutil.ReadyStatus) - - // Return and do not requeue - logger.Debug("zone done reconciling") - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectZone) createCephZone(zone *cephv1.CephObjectZone, realmName string) (reconcile.Result, error) { - logger.Infof("creating object zone %q in zonegroup %q in realm %q", zone.Name, zone.Spec.ZoneGroup, realmName) - - realmArg := fmt.Sprintf("--rgw-realm=%s", realmName) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", zone.Spec.ZoneGroup) - zoneArg := fmt.Sprintf("--rgw-zone=%s", zone.Name) - objContext := object.NewContext(r.context, r.clusterInfo, zone.Name) - - // get zone group to see if master zone exists yet - output, err := object.RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg) - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - return reconcile.Result{}, errors.Wrapf(err, "ceph zone group %q not found", zone.Spec.ZoneGroup) - } else { - return reconcile.Result{}, errors.Wrapf(err, "radosgw-admin zonegroup get failed with code %d", code) - } - } - - // check if master zone does not exist yet for period - zoneGroupJson, err := object.DecodeZoneGroupConfig(output) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to parse `radosgw-admin zonegroup get` output") - } - - // create zone - _, err = object.RunAdminCommandNoMultisite(objContext, true, "zone", "get", realmArg, zoneGroupArg, zoneArg) - if err == nil { - logger.Debugf("ceph zone %q already exists, new zone and pools will not be created", zone.Name) - return reconcile.Result{}, nil - } - - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - logger.Debugf("ceph zone %q not found, running `radosgw-admin zone create`", zone.Name) - - zoneIsMaster := false - if zoneGroupJson.MasterZoneID == "" { - zoneIsMaster = true - } - - err = r.createPoolsAndZone(objContext, zone, realmName, zoneIsMaster) - if err != nil { - return reconcile.Result{}, err - } - } else { - return reconcile.Result{}, errors.Wrapf(err, "radosgw-admin zone get failed with code %d for reason %q", code, output) - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectZone) createPoolsAndZone(objContext *object.Context, zone *cephv1.CephObjectZone, realmName string, zoneIsMaster bool) error { - // create pools for zone - logger.Debugf("creating pools ceph zone %q", zone.Name) - realmArg := fmt.Sprintf("--rgw-realm=%s", realmName) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", zone.Spec.ZoneGroup) - zoneArg := fmt.Sprintf("--rgw-zone=%s", zone.Name) - - err := object.CreatePools(objContext, r.clusterSpec, zone.Spec.MetadataPool, zone.Spec.DataPool) - if err != nil { - return errors.Wrapf(err, "failed to create pools for zone %v", zone.Name) - } - logger.Debugf("created pools ceph zone %q", zone.Name) - - accessKeyArg, secretKeyArg, err := object.GetRealmKeyArgs(r.context, realmName, zone.Namespace) - if err != nil { - return errors.Wrap(err, "failed to get keys for realm") - } - args := []string{"zone", "create", realmArg, zoneGroupArg, zoneArg, accessKeyArg, secretKeyArg} - - if zoneIsMaster { - // master zone does not exist yet for zone group - args = append(args, "--master") - } - - output, err := object.RunAdminCommandNoMultisite(objContext, false, args...) - if err != nil { - return errors.Wrapf(err, "failed to create ceph zone %q for reason %q", zone.Name, output) - } - logger.Debugf("created ceph zone %q", zone.Name) - - return nil -} - -func (r *ReconcileObjectZone) reconcileObjectZoneGroup(zone *cephv1.CephObjectZone) (string, reconcile.Result, error) { - // empty zoneGroup gets filled by r.client.Get() - zoneGroup := &cephv1.CephObjectZoneGroup{} - err := r.client.Get(context.TODO(), types.NamespacedName{Name: zone.Spec.ZoneGroup, Namespace: zone.Namespace}, zoneGroup) - if err != nil { - if kerrors.IsNotFound(err) { - return "", waitForRequeueIfObjectZoneGroupNotReady, err - } - return "", waitForRequeueIfObjectZoneGroupNotReady, errors.Wrapf(err, "error getting cephObjectZoneGroup %v", zone.Spec.ZoneGroup) - } - - logger.Debugf("CephObjectZoneGroup %v found", zoneGroup.Name) - return zoneGroup.Spec.Realm, reconcile.Result{}, nil -} - -func (r *ReconcileObjectZone) reconcileCephZoneGroup(zone *cephv1.CephObjectZone, realmName string) (reconcile.Result, error) { - realmArg := fmt.Sprintf("--rgw-realm=%s", realmName) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", zone.Spec.ZoneGroup) - objContext := object.NewContext(r.context, r.clusterInfo, zone.Name) - - _, err := object.RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg) - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - return waitForRequeueIfObjectZoneGroupNotReady, errors.Wrapf(err, "ceph zone group %q not found", zone.Spec.ZoneGroup) - } else { - return waitForRequeueIfObjectZoneGroupNotReady, errors.Wrapf(err, "radosgw-admin zonegroup get failed with code %d", code) - } - } - - logger.Infof("Zone group %q found in Ceph cluster to create ceph zone %q", zone.Spec.ZoneGroup, zone.Name) - return reconcile.Result{}, nil -} - -// validateZoneCR validates the zone arguments -func (r *ReconcileObjectZone) validateZoneCR(z *cephv1.CephObjectZone) error { - if z.Name == "" { - return errors.New("missing name") - } - if z.Namespace == "" { - return errors.New("missing namespace") - } - if z.Spec.ZoneGroup == "" { - return errors.New("missing zonegroup") - } - if err := pool.ValidatePoolSpec(r.context, r.clusterInfo, r.clusterSpec, &z.Spec.MetadataPool); err != nil { - return errors.Wrap(err, "invalid metadata pool spec") - } - if err := pool.ValidatePoolSpec(r.context, r.clusterInfo, r.clusterSpec, &z.Spec.DataPool); err != nil { - return errors.Wrap(err, "invalid data pool spec") - } - return nil -} - -func (r *ReconcileObjectZone) setFailedStatus(name types.NamespacedName, errMessage string, err error) (reconcile.Result, error) { - updateStatus(r.client, name, k8sutil.ReconcileFailedStatus) - return reconcile.Result{}, errors.Wrapf(err, "%s", errMessage) -} - -// updateStatus updates an zone with a given status -func updateStatus(client client.Client, name types.NamespacedName, status string) { - objectZone := &cephv1.CephObjectZone{} - if err := client.Get(context.TODO(), name, objectZone); err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephObjectZone resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve object zone %q to update status to %q. %v", name, status, err) - return - } - if objectZone.Status == nil { - objectZone.Status = &cephv1.Status{} - } - - objectZone.Status.Phase = status - if err := reporting.UpdateStatus(client, objectZone); err != nil { - logger.Errorf("failed to set object zone %q status to %q. %v", name, status, err) - return - } - logger.Debugf("object zone %q status updated to %q", name, status) -} diff --git a/pkg/operator/ceph/object/zone/controller_test.go b/pkg/operator/ceph/object/zone/controller_test.go deleted file mode 100644 index 4d2a01991..000000000 --- a/pkg/operator/ceph/object/zone/controller_test.go +++ /dev/null @@ -1,362 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package zone to manage a rook object zone. -package zone - -import ( - "context" - "testing" - "time" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/operator/test" - - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - zoneGroupGetJSON = `{ - "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", - "name": "zonegroup-a", - "api_name": "zonegroup-a", - "is_master": "true", - "endpoints": [ - ":80" - ], - "hostnames": [], - "hostnames_s3website": [], - "master_zone": "6cb39d2c-3005-49da-9be3-c1a92a97d28a", - "zones": [ - { - "id": "6cb39d2c-3005-49da-9be3-c1a92a97d28a", - "name": "zone-group", - "endpoints": [ - ":80" - ], - "log_meta": "false", - "log_data": "false", - "bucket_index_max_shards": 0, - "read_only": "false", - "tier_type": "", - "sync_from_all": "true", - "sync_from": [], - "redirect_zone": "" - } - ], - "placement_targets": [ - { - "name": "default-placement", - "tags": [], - "storage_classes": [ - "STANDARD" - ] - } - ], - "default_placement": "default-placement", - "realm_id": "237e6250-5f7d-4b85-9359-8cb2b1848507" - }` - zoneGetOutput = `{"id": "test-id"}` - zoneCreateJSON = `{ - "id": "b1abbebb-e8ae-4c3b-880e-b009728bad53", - "name": "zone-a", - "domain_root": "zone-a.rgw.meta:root", - "control_pool": "zone-a.rgw.control", - "gc_pool": "zone-a.rgw.log:gc", - "lc_pool": "zone-a.rgw.log:lc", - "log_pool": "zone-a.rgw.log", - "intent_log_pool": "zone-a.rgw.log:intent", - "usage_log_pool": "zone-a.rgw.log:usage", - "roles_pool": "zone-a.rgw.meta:roles", - "reshard_pool": "zone-a.rgw.log:reshard", - "user_keys_pool": "zone-a.rgw.meta:users.keys", - "user_email_pool": "zone-a.rgw.meta:users.email", - "user_swift_pool": "zone-a.rgw.meta:users.swift", - "user_uid_pool": "zone-a.rgw.meta:users.uid", - "otp_pool": "zone-a.rgw.otp", - "system_key": { - "access_key": "", - "secret_key": "" - }, - "placement_pools": [ - { - "key": "default-placement", - "val": { - "index_pool": "zone-a.rgw.buckets.index", - "storage_classes": { - "STANDARD": { - "data_pool": "zone-a.rgw.buckets.data" - } - }, - "data_extra_pool": "zone-a.rgw.buckets.non-ec", - "index_type": 0 - } - } - ], - "realm_id": "91b799b2-857d-4c96-8ade-5ceff7c8597e" - }` -) - -func TestCephObjectZoneController(t *testing.T) { - ctx := context.TODO() - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - name := "zone-a" - zonegroup := "zonegroup-a" - namespace := "rook-ceph" - - // - // TEST 1 SETUP - // - // FAILURE: because no CephCluster - // - // A Pool resource with metadata and spec. - metadataPool := cephv1.PoolSpec{} - dataPool := cephv1.PoolSpec{} - objectZone := &cephv1.CephObjectZone{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectZone", - }, - Spec: cephv1.ObjectZoneSpec{ - ZoneGroup: zonegroup, - MetadataPool: metadataPool, - DataPool: dataPool, - }, - } - - // Objects to track in the fake client. - object := []runtime.Object{ - objectZone, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - return "", nil - }, - } - - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectZone{}, &cephv1.CephObjectZoneList{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Create a ReconcileObjectZone object with the scheme and fake client. - clusterInfo := cephclient.AdminClusterInfo("rook") - - r := &ReconcileObjectZone{client: cl, scheme: s, context: c, clusterInfo: clusterInfo} - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - - // - // TEST 2: - // - // FAILURE: we have a cluster but it's not ready - // - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - - object = []runtime.Object{ - objectZone, - cephCluster, - } - - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectZone{}, &cephv1.CephObjectZoneList{}, &cephv1.CephCluster{}, &cephv1.CephClusterList{}) - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Create a ReconcileObjectZone object with the scheme and fake client. - r = &ReconcileObjectZone{client: cl, scheme: r.scheme, context: r.context} - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - - // - // TEST 3: - // - // Failure: The CephCluster is ready but no ObjectZoneGroup has been created - // - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = r.context.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "zonegroup" && args[1] == "get" { - return zoneGroupGetJSON, nil - } - return "", nil - }, - } - r.context.Executor = executor - - r = &ReconcileObjectZone{client: cl, scheme: r.scheme, context: r.context} - - res, err = r.Reconcile(ctx, req) - assert.Error(t, err) - assert.True(t, res.Requeue) - - // - // TEST 4: - // - // Success: The CephCluster is ready and ObjectZone has been created - // - - objectZoneGroup := &cephv1.CephObjectZoneGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: zonegroup, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectZoneGroup", - }, - Spec: cephv1.ObjectZoneGroupSpec{}, - } - - // Objects to track in the fake client. - object = []runtime.Object{ - objectZone, - objectZoneGroup, - cephCluster, - } - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "zonegroup" && args[1] == "get" { - return zoneGroupGetJSON, nil - } - if args[0] == "zone" && args[1] == "get" { - return zoneGetOutput, nil - } - if args[0] == "zone" && args[1] == "create" { - return zoneCreateJSON, nil - } - return "", nil - }, - } - - clientset = test.New(t, 3) - c = &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectZoneGroup{}, &cephv1.CephObjectZoneGroupList{}, &cephv1.CephCluster{}, &cephv1.CephClusterList{}, &cephv1.CephObjectZone{}, &cephv1.CephObjectZoneList{}) - - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - r = &ReconcileObjectZone{client: cl, scheme: s, context: c, clusterInfo: clusterInfo} - - err = r.client.Get(context.TODO(), types.NamespacedName{Name: zonegroup, Namespace: namespace}, objectZoneGroup) - assert.NoError(t, err, objectZoneGroup) - - req = reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, objectZone) - assert.NoError(t, err) -} diff --git a/pkg/operator/ceph/object/zonegroup/controller.go b/pkg/operator/ceph/object/zonegroup/controller.go deleted file mode 100644 index 34c5a3f90..000000000 --- a/pkg/operator/ceph/object/zonegroup/controller.go +++ /dev/null @@ -1,322 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package objectzonegroup to manage a rook object zonegroup. -package zonegroup - -import ( - "context" - "fmt" - "reflect" - "syscall" - "time" - - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/reporting" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/object" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - controllerName = "ceph-object-zonegroup-controller" -) - -var waitForRequeueIfObjectRealmNotReady = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -var cephObjectZoneGroupKind = reflect.TypeOf(cephv1.CephObjectZoneGroup{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephObjectZoneGroupKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -// ReconcileObjectZoneGroup reconciles a ObjectZoneGroup object -type ReconcileObjectZoneGroup struct { - client client.Client - scheme *runtime.Scheme - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo -} - -// Add creates a new CephObjectZoneGroup Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - return &ReconcileObjectZoneGroup{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the CephObjectZoneGroup CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephObjectZoneGroup{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a CephObjectZoneGroup object and makes changes based on the state read -// and what is in the CephObjectZoneGroup.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileObjectZoneGroup) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, err := r.reconcile(request) - if err != nil { - logger.Errorf("failed to reconcile: %v", err) - } - - return reconcileResponse, err -} - -func (r *ReconcileObjectZoneGroup) reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the CephObjectZoneGroup instance - cephObjectZoneGroup := &cephv1.CephObjectZoneGroup{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephObjectZoneGroup) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephObjectZoneGroup resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, errors.Wrap(err, "failed to get CephObjectZoneGroup") - } - - // The CR was just created, initializing status fields - if cephObjectZoneGroup.Status == nil { - updateStatus(r.client, request.NamespacedName, k8sutil.EmptyStatus) - } - - // Make sure a CephCluster is present otherwise do nothing - _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - // This handles the case where the Ceph Cluster is gone and we want to delete that CR - if !cephObjectZoneGroup.GetDeletionTimestamp().IsZero() && !cephClusterExists { - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - return reconcileResponse, nil - } - - // DELETE: the CR was deleted - if !cephObjectZoneGroup.GetDeletionTimestamp().IsZero() { - logger.Debugf("deleting zone group CR %q", cephObjectZoneGroup.Name) - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - - // Populate clusterInfo during each reconcile - r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to populate cluster info") - } - - // validate the zone group settings - err = validateZoneGroup(cephObjectZoneGroup) - if err != nil { - updateStatus(r.client, request.NamespacedName, k8sutil.ReconcileFailedStatus) - return reconcile.Result{}, errors.Wrapf(err, "invalid CephObjectZoneGroup CR %q", cephObjectZoneGroup.Name) - } - - // Start object reconciliation, updating status for this - updateStatus(r.client, request.NamespacedName, k8sutil.ReconcilingStatus) - - // Make sure an ObjectRealm Resource is present - reconcileResponse, err = r.reconcileObjectRealm(cephObjectZoneGroup) - if err != nil { - return reconcileResponse, err - } - - // Make sure Realm has been created in Ceph Cluster - reconcileResponse, err = r.reconcileCephRealm(cephObjectZoneGroup) - if err != nil { - return reconcileResponse, err - } - - // Create/Update Ceph Zone Group - _, err = r.createCephZoneGroup(cephObjectZoneGroup) - if err != nil { - return r.setFailedStatus(request.NamespacedName, "failed to create ceph zone group", err) - } - - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, k8sutil.ReadyStatus) - - // Return and do not requeue - logger.Debug("zone group done reconciling") - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectZoneGroup) createCephZoneGroup(zoneGroup *cephv1.CephObjectZoneGroup) (reconcile.Result, error) { - logger.Infof("creating object zone group %q in realm %q", zoneGroup.Name, zoneGroup.Spec.Realm) - - realmArg := fmt.Sprintf("--rgw-realm=%s", zoneGroup.Spec.Realm) - zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", zoneGroup.Name) - objContext := object.NewContext(r.context, r.clusterInfo, zoneGroup.Name) - - // get period to see if master zone group exists yet - output, err := object.RunAdminCommandNoMultisite(objContext, true, "period", "get", realmArg) - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - return reconcile.Result{}, errors.Wrapf(err, "ceph period %q not found", zoneGroup.Spec.Realm) - } else { - return reconcile.Result{}, errors.Wrapf(err, "radosgw-admin period get failed with code %d", code) - } - } - - // check if master zone group does not exist yet for period - masterZoneGroup, err := decodeMasterZoneGroup(output) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to parse `radosgw-admin period get` output") - } - - zoneGroupIsMaster := false - if masterZoneGroup == "" { - zoneGroupIsMaster = true - } - - // create zone group - output, err = object.RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg) - if err == nil { - return reconcile.Result{}, nil - } - - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - logger.Debugf("ceph zone group %q not found, running `radosgw-admin zonegroup create`", zoneGroup.Name) - args := []string{ - "zonegroup", - "create", - realmArg, - zoneGroupArg, - } - - if zoneGroupIsMaster { - // master zone group does not exist yet for realm - args = append(args, "--master") - } - - output, err = object.RunAdminCommandNoMultisite(objContext, false, args...) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to create ceph zone group %q for reason %q", zoneGroup.Name, output) - } - } else { - return reconcile.Result{}, errors.Wrapf(err, "radosgw-admin zonegroup get failed with code %d for reason %q", code, output) - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectZoneGroup) reconcileObjectRealm(zoneGroup *cephv1.CephObjectZoneGroup) (reconcile.Result, error) { - // Verify the object realm API object actually exists - cephObjectRealm := &cephv1.CephObjectRealm{} - err := r.client.Get(context.TODO(), types.NamespacedName{Name: zoneGroup.Spec.Realm, Namespace: zoneGroup.Namespace}, cephObjectRealm) - if err != nil { - if kerrors.IsNotFound(err) { - return waitForRequeueIfObjectRealmNotReady, errors.Wrapf(err, "realm %q not found", zoneGroup.Spec.Realm) - } - return waitForRequeueIfObjectRealmNotReady, errors.Wrapf(err, "error finding CephObjectRealm %s", zoneGroup.Spec.Realm) - } - - logger.Infof("CephObjectRealm %q found for CephObjectZoneGroup %q", zoneGroup.Spec.Realm, zoneGroup.Name) - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectZoneGroup) reconcileCephRealm(zoneGroup *cephv1.CephObjectZoneGroup) (reconcile.Result, error) { - realmArg := fmt.Sprintf("--rgw-realm=%s", zoneGroup.Spec.Realm) - objContext := object.NewContext(r.context, r.clusterInfo, zoneGroup.Name) - - _, err := object.RunAdminCommandNoMultisite(objContext, true, "realm", "get", realmArg) - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - return waitForRequeueIfObjectRealmNotReady, errors.Wrapf(err, "ceph realm %q not found", zoneGroup.Spec.Realm) - } else { - return waitForRequeueIfObjectRealmNotReady, errors.Wrapf(err, "radosgw-admin realm get failed with code %d", code) - } - } - - logger.Infof("Realm %q found in Ceph cluster to create ceph zone group %q", zoneGroup.Spec.Realm, zoneGroup.Name) - return reconcile.Result{}, nil -} - -func (r *ReconcileObjectZoneGroup) setFailedStatus(name types.NamespacedName, errMessage string, err error) (reconcile.Result, error) { - updateStatus(r.client, name, k8sutil.ReconcileFailedStatus) - return reconcile.Result{}, errors.Wrapf(err, "%s", errMessage) -} - -// updateStatus updates an zone group with a given status -func updateStatus(client client.Client, name types.NamespacedName, status string) { - objectZoneGroup := &cephv1.CephObjectZoneGroup{} - if err := client.Get(context.TODO(), name, objectZoneGroup); err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephObjectZoneGroup resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve object zone group %q to update status to %q. %v", name, status, err) - return - } - if objectZoneGroup.Status == nil { - objectZoneGroup.Status = &cephv1.Status{} - } - - objectZoneGroup.Status.Phase = status - if err := reporting.UpdateStatus(client, objectZoneGroup); err != nil { - logger.Errorf("failed to set object zone group %q status to %q. %v", name, status, err) - return - } - logger.Debugf("object zone group %q status updated to %q", name, status) -} diff --git a/pkg/operator/ceph/object/zonegroup/controller_test.go b/pkg/operator/ceph/object/zonegroup/controller_test.go deleted file mode 100644 index 897c78041..000000000 --- a/pkg/operator/ceph/object/zonegroup/controller_test.go +++ /dev/null @@ -1,364 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package zonegroup to manage a rook object zone group. -package zonegroup - -import ( - "context" - "testing" - "time" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/operator/test" - - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var ( - name = "zonegroup-a" - realm = "realm-a" - namespace = "rook-ceph" - realmGetJSON = `{ - "id": "237e6250-5f7d-4b85-9359-8cb2b1848507", - "name": "realm-a", - "current_period": "df665ecb-1762-47a9-9c66-f938d251c02a", - "epoch": 2 - }` - periodGetJSON = `{ - "id": "3ee7933c-da65-4255-a5a6-f7348aaf2ece", - "epoch": 1, - "predecessor_uuid": "", - "sync_status": [], - "period_map": { - "id": "3ee7933c-da65-4255-a5a6-f7348aaf2ece", - "zonegroups": [], - "short_zone_ids": [] - }, - "master_zonegroup": "", - "master_zone": "", - "period_config": { - "bucket_quota": { - "enabled": false, - "check_on_raw": false, - "max_size": -1, - "max_size_kb": 0, - "max_objects": -1 - }, - "user_quota": { - "enabled": false, - "check_on_raw": false, - "max_size": -1, - "max_size_kb": 0, - "max_objects": -1 - } - }, - "realm_id": "ef13e399-a981-4138-a37c-e2ae05a06532", - "realm_name": "realm-a", - "realm_epoch": 1 - }` - zoneGroupGetJSON = `{ - "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", - "name": "zonegroup-a", - "api_name": "zonegroup-a", - "is_master": "true", - "endpoints": [ - ":80" - ], - "hostnames": [], - "hostnames_s3website": [], - "master_zone": "6cb39d2c-3005-49da-9be3-c1a92a97d28a", - "zones": [ - { - "id": "6cb39d2c-3005-49da-9be3-c1a92a97d28a", - "name": "zone-group", - "endpoints": [ - ":80" - ], - "log_meta": "false", - "log_data": "false", - "bucket_index_max_shards": 0, - "read_only": "false", - "tier_type": "", - "sync_from_all": "true", - "sync_from": [], - "redirect_zone": "" - } - ], - "placement_targets": [ - { - "name": "default-placement", - "tags": [], - "storage_classes": [ - "STANDARD" - ] - } - ], - "default_placement": "default-placement", - "realm_id": "237e6250-5f7d-4b85-9359-8cb2b1848507" - }` -) - -func TestCephObjectZoneGroupController(t *testing.T) { - ctx := context.TODO() - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - // - // TEST 1 SETUP - // - // FAILURE: because no CephCluster - // - // A Pool resource with metadata and spec. - objectZoneGroup := &cephv1.CephObjectZoneGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectZoneGroup", - }, - Spec: cephv1.ObjectZoneGroupSpec{ - Realm: realm, - }, - } - - // Objects to track in the fake client. - object := []runtime.Object{ - objectZoneGroup, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - return "", nil - }, - } - - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectZoneGroup{}, &cephv1.CephObjectZoneGroupList{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Create a ReconcileObjectZoneGroup object with the scheme and fake client. - clusterInfo := cephclient.AdminClusterInfo("rook") - - r := &ReconcileObjectZoneGroup{client: cl, scheme: s, context: c, clusterInfo: clusterInfo} - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - - // - // TEST 2: - // - // FAILURE: we have a cluster but it's not ready - // - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - - object = []runtime.Object{ - objectZoneGroup, - cephCluster, - } - - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectZoneGroup{}, &cephv1.CephObjectZoneGroupList{}, &cephv1.CephCluster{}, &cephv1.CephClusterList{}) - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Create a ReconcileObjectZoneGroup object with the scheme and fake client. - r = &ReconcileObjectZoneGroup{client: cl, scheme: r.scheme, context: r.context} - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - - // - // TEST 3: - // - // Failure: The CephCluster is ready but no ObjectRealm has been created - // - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = r.context.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "realm" && args[1] == "get" { - return realmGetJSON, nil - } - if args[0] == "zonegroup" && args[1] == "get" { - return zoneGroupGetJSON, nil - } - return "", nil - }, - } - r.context.Executor = executor - - // Create a ReconcileObjectZoneGroup - r = &ReconcileObjectZoneGroup{client: cl, scheme: r.scheme, context: r.context} - - res, err = r.Reconcile(ctx, req) - assert.Error(t, err) - assert.True(t, res.Requeue) - - // - // TEST 4: - // - // Success: The CephCluster is ready and ObjectRealm has been created - // - - objectRealm := &cephv1.CephObjectRealm{ - ObjectMeta: metav1.ObjectMeta{ - Name: realm, - Namespace: namespace, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CephObjectRealm", - }, - Spec: cephv1.ObjectRealmSpec{}, - } - - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - // Objects to track in the fake client. - object = []runtime.Object{ - objectZoneGroup, - objectRealm, - cephCluster, - } - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "zonegroup" && args[1] == "get" { - return zoneGroupGetJSON, nil - } - if args[0] == "period" && args[1] == "get" { - return periodGetJSON, nil - } - if args[0] == "realm" && args[1] == "get" { - return realmGetJSON, nil - } - return "", nil - }, - } - - clientset = test.New(t, 3) - c = &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectZoneGroup{}, &cephv1.CephObjectZoneGroupList{}, &cephv1.CephCluster{}, &cephv1.CephClusterList{}, &cephv1.CephObjectRealm{}, &cephv1.CephObjectRealmList{}) - - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - r = &ReconcileObjectZoneGroup{client: cl, scheme: s, context: c, clusterInfo: clusterInfo} - - err = r.client.Get(context.TODO(), types.NamespacedName{Name: realm, Namespace: namespace}, objectRealm) - assert.NoError(t, err, objectRealm) - - req = reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, objectZoneGroup) - assert.NoError(t, err) -} diff --git a/pkg/operator/ceph/object/zonegroup/zonegroup.go b/pkg/operator/ceph/object/zonegroup/zonegroup.go deleted file mode 100644 index 27b1b709e..000000000 --- a/pkg/operator/ceph/object/zonegroup/zonegroup.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package zonegroup - -import ( - "encoding/json" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" -) - -type masterZoneGroupType struct { - MasterZoneGroup string `json:"master_zonegroup"` -} - -func decodeMasterZoneGroup(data string) (string, error) { - var periodGet masterZoneGroupType - err := json.Unmarshal([]byte(data), &periodGet) - if err != nil { - return "", errors.Wrap(err, "Failed to unmarshal json") - } - - return periodGet.MasterZoneGroup, err -} - -// validateZoneGroup validates the zonegroup arguments -func validateZoneGroup(u *cephv1.CephObjectZoneGroup) error { - if u.Name == "" { - return errors.New("missing name") - } - if u.Namespace == "" { - return errors.New("missing namespace") - } - if u.Spec.Realm == "" { - return errors.New("missing realm") - } - return nil -} diff --git a/pkg/operator/ceph/operator.go b/pkg/operator/ceph/operator.go deleted file mode 100644 index 334afcdf2..000000000 --- a/pkg/operator/ceph/operator.go +++ /dev/null @@ -1,303 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package operator to manage Kubernetes storage. -package operator - -import ( - "context" - "os" - "os/signal" - "strings" - "syscall" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - "github.com/rook/rook/pkg/operator/ceph/agent" - "github.com/rook/rook/pkg/operator/ceph/cluster" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/csi" - "github.com/rook/rook/pkg/operator/ceph/provisioner" - "github.com/rook/rook/pkg/operator/discover" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" -) - -// volume provisioner constant -const ( - provisionerName = "ceph.rook.io/block" - provisionerNameLegacy = "rook.io/block" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "operator") - - // The supported configurations for the volume provisioner - provisionerConfigs = map[string]string{ - provisionerName: flexvolume.FlexvolumeVendor, - provisionerNameLegacy: flexvolume.FlexvolumeVendorLegacy, - } - - // ImmediateRetryResult Return this for a immediate retry of the reconciliation loop with the same request object. - ImmediateRetryResult = reconcile.Result{Requeue: true} - - // Signals to watch for to terminate the operator gracefully - // Using os.Interrupt is more portable across platforms instead of os.SIGINT - shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} -) - -// Operator type for managing storage -type Operator struct { - context *clusterd.Context - resources []k8sutil.CustomResource - operatorNamespace string - rookImage string - securityAccount string - // The custom resource that is global to the kubernetes cluster. - // The cluster is global because you create multiple clusters in k8s - clusterController *cluster.ClusterController - delayedDaemonsStarted bool -} - -// New creates an operator instance -func New(context *clusterd.Context, volumeAttachmentWrapper attachment.Attachment, rookImage, securityAccount string) *Operator { - schemes := []k8sutil.CustomResource{opcontroller.ClusterResource, attachment.VolumeResource} - - operatorNamespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - o := &Operator{ - context: context, - resources: schemes, - operatorNamespace: operatorNamespace, - rookImage: rookImage, - securityAccount: securityAccount, - } - operatorConfigCallbacks := []func() error{ - o.updateDrivers, - o.updateOperatorLogLevel, - } - addCallbacks := []func() error{ - o.startDrivers, - } - o.clusterController = cluster.NewClusterController(context, rookImage, volumeAttachmentWrapper, operatorConfigCallbacks, addCallbacks) - return o -} - -func (o *Operator) cleanup(stopCh chan struct{}) { - close(stopCh) - o.clusterController.StopWatch() -} - -func (o *Operator) updateOperatorLogLevel() error { - rookLogLevel, err := k8sutil.GetOperatorSetting(o.context.Clientset, opcontroller.OperatorSettingConfigMapName, "ROOK_LOG_LEVEL", "INFO") - if err != nil { - logger.Warningf("failed to load ROOK_LOG_LEVEL. Defaulting to INFO. %v", err) - rookLogLevel = "INFO" - } - - logLevel, err := capnslog.ParseLevel(strings.ToUpper(rookLogLevel)) - if err != nil { - return errors.Wrapf(err, "failed to load ROOK_LOG_LEVEL %q.", rookLogLevel) - } - - capnslog.SetGlobalLogLevel(logLevel) - return nil -} - -// Run the operator instance -func (o *Operator) Run() error { - - if o.operatorNamespace == "" { - return errors.Errorf("rook operator namespace is not provided. expose it via downward API in the rook operator manifest file using environment variable %q", k8sutil.PodNamespaceEnvVar) - } - - opcontroller.SetCephCommandsTimeout(o.context) - - // Initialize signal handler and context - stopContext, stopFunc := signal.NotifyContext(context.Background(), shutdownSignals...) - defer stopFunc() - - rookDiscover := discover.New(o.context.Clientset) - if opcontroller.DiscoveryDaemonEnabled(o.context) { - if err := rookDiscover.Start(o.operatorNamespace, o.rookImage, o.securityAccount, true); err != nil { - return errors.Wrap(err, "failed to start device discovery daemonset") - } - } else { - if err := rookDiscover.Stop(stopContext, o.operatorNamespace); err != nil { - return errors.Wrap(err, "failed to stop device discovery daemonset") - } - } - - logger.Debug("checking for admission controller secrets") - err := StartControllerIfSecretPresent(stopContext, o.context, o.rookImage) - if err != nil { - return errors.Wrap(err, "failed to start webhook") - } - serverVersion, err := o.context.Clientset.Discovery().ServerVersion() - if err != nil { - return errors.Wrap(err, "failed to get server version") - } - - // Initialize stop channel for watchers - stopChan := make(chan struct{}) - - // For Flex Driver, run volume provisioner for each of the supported configurations - if opcontroller.FlexDriverEnabled(o.context) { - for name, vendor := range provisionerConfigs { - volumeProvisioner := provisioner.New(o.context, vendor) - pc := controller.NewProvisionController( - o.context.Clientset, - name, - volumeProvisioner, - serverVersion.GitVersion, - ) - go pc.Run(stopContext) - logger.Infof("rook-provisioner %q started using %q flex vendor dir", name, vendor) - } - } - - var namespaceToWatch string - if os.Getenv("ROOK_CURRENT_NAMESPACE_ONLY") == "true" { - logger.Infof("watching the current namespace for a ceph cluster CR") - namespaceToWatch = o.operatorNamespace - } else { - logger.Infof("watching all namespaces for ceph cluster CRs") - namespaceToWatch = v1.NamespaceAll - } - - // Start the controller-runtime Manager. - mgrErrorChan := make(chan error) - go o.startManager(namespaceToWatch, stopContext, mgrErrorChan) - - // Start the operator setting watcher - go o.clusterController.StartOperatorSettingsWatch(stopChan) - - // Signal handler to stop the operator - for { - select { - case <-stopContext.Done(): - logger.Infof("shutdown signal received, exiting... %v", stopContext.Err()) - o.cleanup(stopChan) - return nil - case err := <-mgrErrorChan: - logger.Errorf("gave up to run the operator. %v", err) - o.cleanup(stopChan) - return err - } - } -} - -func (o *Operator) startDrivers() error { - if o.delayedDaemonsStarted { - return nil - } - - o.delayedDaemonsStarted = true - if err := o.updateDrivers(); err != nil { - o.delayedDaemonsStarted = false // unset because failed to updateDrivers - return err - } - - return nil -} - -func (o *Operator) updateDrivers() error { - var err error - - // Skipping CSI driver update since the first cluster hasn't been started yet - if !o.delayedDaemonsStarted { - return nil - } - - if o.operatorNamespace == "" { - return errors.Errorf("rook operator namespace is not provided. expose it via downward API in the rook operator manifest file using environment variable %s", k8sutil.PodNamespaceEnvVar) - } - - if opcontroller.FlexDriverEnabled(o.context) { - rookAgent := agent.New(o.context.Clientset) - if err := rookAgent.Start(o.operatorNamespace, o.rookImage, o.securityAccount); err != nil { - return errors.Wrap(err, "error starting agent daemonset") - } - } - - serverVersion, err := o.context.Clientset.Discovery().ServerVersion() - if err != nil { - return errors.Wrap(err, "error getting server version") - } - - if serverVersion.Major < csi.KubeMinMajor || serverVersion.Major == csi.KubeMinMajor && serverVersion.Minor < csi.ProvDeploymentSuppVersion { - logger.Infof("CSI drivers only supported in K8s 1.14 or newer. version=%s", serverVersion.String()) - // disable csi control variables to disable other csi functions - csi.EnableRBD = false - csi.EnableCephFS = false - return nil - } - - ownerRef, err := getDeploymentOwnerReference(o.context.Clientset, o.operatorNamespace) - if err != nil { - logger.Warningf("could not find deployment owner reference to assign to csi drivers. %v", err) - } - if ownerRef != nil { - blockOwnerDeletion := false - ownerRef.BlockOwnerDeletion = &blockOwnerDeletion - } - - ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(ownerRef, o.operatorNamespace) - // create an empty config map. config map will be filled with data - // later when clusters have mons - err = csi.CreateCsiConfigMap(o.operatorNamespace, o.context.Clientset, ownerInfo) - if err != nil { - return errors.Wrap(err, "failed creating csi config map") - } - - go csi.ValidateAndConfigureDrivers(o.context, o.operatorNamespace, o.rookImage, o.securityAccount, serverVersion, ownerInfo) - return nil -} - -// getDeploymentOwnerReference returns an OwnerReference to the rook-ceph-operator deployment -func getDeploymentOwnerReference(clientset kubernetes.Interface, namespace string) (*metav1.OwnerReference, error) { - ctx := context.TODO() - var deploymentRef *metav1.OwnerReference - podName := os.Getenv(k8sutil.PodNameEnvVar) - pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "could not find pod %q to find deployment owner reference", podName) - } - for _, podOwner := range pod.OwnerReferences { - if podOwner.Kind == "ReplicaSet" { - replicaset, err := clientset.AppsV1().ReplicaSets(namespace).Get(ctx, podOwner.Name, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "could not find replicaset %q to find deployment owner reference", podOwner.Name) - } - for _, replicasetOwner := range replicaset.OwnerReferences { - if replicasetOwner.Kind == "Deployment" { - localreplicasetOwner := replicasetOwner - deploymentRef = &localreplicasetOwner - } - } - } - } - if deploymentRef == nil { - return nil, errors.New("could not find owner reference for rook-ceph deployment") - } - return deploymentRef, nil -} diff --git a/pkg/operator/ceph/operator_test.go b/pkg/operator/ceph/operator_test.go deleted file mode 100644 index 43e57e3ce..000000000 --- a/pkg/operator/ceph/operator_test.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operator - -import ( - "fmt" - "testing" - - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" -) - -func TestOperator(t *testing.T) { - clientset := test.New(t, 3) - context := &clusterd.Context{Clientset: clientset} - o := New(context, &attachment.MockAttachment{}, "", "") - - assert.NotNil(t, o) - assert.NotNil(t, o.clusterController) - assert.NotNil(t, o.resources) - assert.Equal(t, context, o.context) - assert.Equal(t, len(o.resources), 2) - for _, r := range o.resources { - if r.Name != opcontroller.ClusterResource.Name && r.Name != attachment.VolumeResource.Name { - assert.Fail(t, fmt.Sprintf("Resource %s is not valid", r.Name)) - } - } -} diff --git a/pkg/operator/ceph/pool/controller.go b/pkg/operator/ceph/pool/controller.go deleted file mode 100644 index 30c9d2ea9..000000000 --- a/pkg/operator/ceph/pool/controller.go +++ /dev/null @@ -1,410 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package pool to manage a rook pool. -package pool - -import ( - "context" - "fmt" - "reflect" - "strings" - - "github.com/coreos/pkg/capnslog" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" - "github.com/rook/rook/pkg/operator/ceph/config" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - poolApplicationNameRBD = "rbd" - controllerName = "ceph-block-pool-controller" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) - -var cephBlockPoolKind = reflect.TypeOf(cephv1.CephBlockPool{}).Name() - -// Sets the type meta for the controller main object -var controllerTypeMeta = metav1.TypeMeta{ - Kind: cephBlockPoolKind, - APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), -} - -var _ reconcile.Reconciler = &ReconcileCephBlockPool{} - -// ReconcileCephBlockPool reconciles a CephBlockPool object -type ReconcileCephBlockPool struct { - client client.Client - scheme *runtime.Scheme - context *clusterd.Context - clusterInfo *cephclient.ClusterInfo - blockPoolChannels map[string]*blockPoolHealth -} - -type blockPoolHealth struct { - stopChan chan struct{} - monitoringRunning bool -} - -// Add creates a new CephBlockPool Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager, context *clusterd.Context) error { - return add(mgr, newReconciler(mgr, context)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler { - // Add the cephv1 scheme to the manager scheme so that the controller knows about it - mgrScheme := mgr.GetScheme() - if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) - } - return &ReconcileCephBlockPool{ - client: mgr.GetClient(), - scheme: mgrScheme, - context: context, - blockPoolChannels: make(map[string]*blockPoolHealth), - } -} - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - logger.Info("successfully started") - - // Watch for changes on the CephBlockPool CRD object - err = c.Watch(&source.Kind{Type: &cephv1.CephBlockPool{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) - if err != nil { - return err - } - - // Build Handler function to return the list of ceph block pool - // This is used by the watchers below - handlerFunc, err := opcontroller.ObjectToCRMapper(mgr.GetClient(), &cephv1.CephBlockPoolList{}, mgr.GetScheme()) - if err != nil { - return err - } - - // Watch for ConfigMap "rook-ceph-mon-endpoints" update and reconcile, which will reconcile update the bootstrap peer token - err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: corev1.SchemeGroupVersion.String()}}}, handler.EnqueueRequestsFromMapFunc(handlerFunc), mon.PredicateMonEndpointChanges()) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a CephBlockPool object and makes changes based on the state read -// and what is in the CephBlockPool.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileCephBlockPool) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { - // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface - reconcileResponse, err := r.reconcile(request) - if err != nil { - logger.Errorf("failed to reconcile. %v", err) - } - - return reconcileResponse, err -} - -func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the CephBlockPool instance - cephBlockPool := &cephv1.CephBlockPool{} - err := r.client.Get(context.TODO(), request.NamespacedName, cephBlockPool) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephBlockPool resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to get CephBlockPool") - } - - // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephBlockPool) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to add finalizer") - } - - // The CR was just created, initializing status fields - if cephBlockPool.Status == nil { - updateStatus(r.client, request.NamespacedName, cephv1.ConditionProgressing, nil) - } - - // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) - if !isReadyToReconcile { - // This handles the case where the Ceph Cluster is gone and we want to delete that CR - // We skip the deletePool() function since everything is gone already - // - // Also, only remove the finalizer if the CephCluster is gone - // If not, we should wait for it to be ready - // This handles the case where the operator is not ready to accept Ceph command but the cluster exists - if !cephBlockPool.GetDeletionTimestamp().IsZero() && !cephClusterExists { - // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephBlockPool) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - return reconcileResponse, nil - } - - // Populate clusterInfo during each reconcile - clusterInfo, _, _, err := mon.LoadClusterInfo(r.context, request.NamespacedName.Namespace) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to populate cluster info") - } - r.clusterInfo = clusterInfo - - // Initialize the channel for this pool - // This allows us to track multiple CephBlockPool in the same namespace - blockPoolChannelKey := fmt.Sprintf("%s-%s", cephBlockPool.Namespace, cephBlockPool.Name) - _, poolChannelExists := r.blockPoolChannels[blockPoolChannelKey] - if !poolChannelExists { - r.blockPoolChannels[blockPoolChannelKey] = &blockPoolHealth{ - stopChan: make(chan struct{}), - monitoringRunning: false, - } - } - - // DELETE: the CR was deleted - if !cephBlockPool.GetDeletionTimestamp().IsZero() { - // If the ceph block pool is still in the map, we must remove it during CR deletion - // We must remove it first otherwise the checker will panic since the status/info will be nil - if poolChannelExists { - r.cancelMirrorMonitoring(blockPoolChannelKey) - } - - logger.Infof("deleting pool %q", cephBlockPool.Name) - err := deletePool(r.context, clusterInfo, cephBlockPool) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to delete pool %q. ", cephBlockPool.Name) - } - - // disable RBD stats collection if cephBlockPool was deleted - if err := configureRBDStats(r.context, clusterInfo); err != nil { - logger.Errorf("failed to disable stats collection for pool(s). %v", err) - } - - // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephBlockPool) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to remove finalizer") - } - - // Return and do not requeue. Successful deletion. - return reconcile.Result{}, nil - } - - // validate the pool settings - if err := ValidatePool(r.context, clusterInfo, &cephCluster.Spec, cephBlockPool); err != nil { - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Info(opcontroller.OperatorNotInitializedMessage) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil - } - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "invalid pool CR %q spec", cephBlockPool.Name) - } - - // Get CephCluster version - cephVersion, err := opcontroller.GetImageVersion(cephCluster) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to fetch ceph version from cephcluster %q", cephCluster.Name) - } - r.clusterInfo.CephVersion = *cephVersion - - // If the CephCluster has enabled the "pg_autoscaler" module and is running Nautilus - // we force the pg_autoscale_mode to "on" - _, propertyExists := cephBlockPool.Spec.Parameters[cephclient.PgAutoscaleModeProperty] - if mgr.IsModuleInSpec(cephCluster.Spec.Mgr.Modules, mgr.PgautoscalerModuleName) && - !cephVersion.IsAtLeastOctopus() && - !propertyExists { - if len(cephBlockPool.Spec.Parameters) == 0 { - cephBlockPool.Spec.Parameters = make(map[string]string) - } - cephBlockPool.Spec.Parameters[cephclient.PgAutoscaleModeProperty] = cephclient.PgAutoscaleModeOn - } - - // CREATE/UPDATE - reconcileResponse, err = r.reconcileCreatePool(clusterInfo, &cephCluster.Spec, cephBlockPool) - if err != nil { - if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { - logger.Info(opcontroller.OperatorNotInitializedMessage) - return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil - } - updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure, nil) - return reconcileResponse, errors.Wrapf(err, "failed to create pool %q.", cephBlockPool.GetName()) - } - - // enable/disable RBD stats collection based on cephBlockPool spec - if err := configureRBDStats(r.context, clusterInfo); err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to enable/disable stats collection for pool(s)") - } - - checker := newMirrorChecker(r.context, r.client, r.clusterInfo, request.NamespacedName, &cephBlockPool.Spec, cephBlockPool.Name) - // ADD PEERS - logger.Debug("reconciling create rbd mirror peer configuration") - if cephBlockPool.Spec.Mirroring.Enabled { - // Always create a bootstrap peer token in case another cluster wants to add us as a peer - reconcileResponse, err = opcontroller.CreateBootstrapPeerSecret(r.context, clusterInfo, cephBlockPool, k8sutil.NewOwnerInfo(cephBlockPool, r.scheme)) - if err != nil { - updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure, nil) - return reconcileResponse, errors.Wrapf(err, "failed to create rbd-mirror bootstrap peer for pool %q.", cephBlockPool.GetName()) - } - - // Check if rbd-mirror CR and daemons are running - logger.Debug("listing rbd-mirror CR") - // Run the goroutine to update the mirroring status - if !cephBlockPool.Spec.StatusCheck.Mirror.Disabled { - // Start monitoring of the pool - if r.blockPoolChannels[blockPoolChannelKey].monitoringRunning { - logger.Debug("pool monitoring go routine already running!") - } else { - r.blockPoolChannels[blockPoolChannelKey].monitoringRunning = true - go checker.checkMirroring(r.blockPoolChannels[blockPoolChannelKey].stopChan) - } - } - - // Add bootstrap peer if any - logger.Debug("reconciling ceph bootstrap peers import") - reconcileResponse, err = r.reconcileAddBoostrapPeer(cephBlockPool, request.NamespacedName) - if err != nil { - return reconcileResponse, errors.Wrap(err, "failed to add ceph rbd mirror peer") - } - - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady, opcontroller.GenerateStatusInfo(cephBlockPool)) - - // If not mirrored there is no Status Info field to fulfil - } else { - // Set Ready status, we are done reconciling - updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady, nil) - - // Stop monitoring the mirroring status of this pool - if poolChannelExists && r.blockPoolChannels[blockPoolChannelKey].monitoringRunning { - r.cancelMirrorMonitoring(blockPoolChannelKey) - // Reset the MirrorHealthCheckSpec - checker.updateStatusMirroring(nil, nil, nil, "") - } - } - - // Return and do not requeue - logger.Debug("done reconciling") - return reconcile.Result{}, nil -} - -func (r *ReconcileCephBlockPool) reconcileCreatePool(clusterInfo *cephclient.ClusterInfo, cephCluster *cephv1.ClusterSpec, cephBlockPool *cephv1.CephBlockPool) (reconcile.Result, error) { - err := createPool(r.context, clusterInfo, cephCluster, cephBlockPool) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to create pool %q.", cephBlockPool.GetName()) - } - - // Let's return here so that on the initial creation we don't check for update right away - return reconcile.Result{}, nil -} - -// Create the pool -func createPool(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, clusterSpec *cephv1.ClusterSpec, p *cephv1.CephBlockPool) error { - // create the pool - logger.Infof("creating pool %q in namespace %q", p.Name, p.Namespace) - if err := cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, p.Name, p.Spec, poolApplicationNameRBD); err != nil { - return errors.Wrapf(err, "failed to create pool %q", p.Name) - } - - return nil -} - -// Delete the pool -func deletePool(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, p *cephv1.CephBlockPool) error { - pools, err := cephclient.ListPoolSummaries(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to list pools") - } - - // Only delete the pool if it exists... - for _, pool := range pools { - if pool.Name == p.Name { - err := cephclient.DeletePool(context, clusterInfo, p.Name) - if err != nil { - return errors.Wrapf(err, "failed to delete pool %q", p.Name) - } - } - } - - return nil -} - -func configureRBDStats(clusterContext *clusterd.Context, clusterInfo *cephclient.ClusterInfo) error { - logger.Debug("configuring RBD per-image IO statistics collection") - namespaceListOpt := client.InNamespace(clusterInfo.Namespace) - cephBlockPoolList := &cephv1.CephBlockPoolList{} - var enableStatsForPools []string - err := clusterContext.Client.List(context.TODO(), cephBlockPoolList, namespaceListOpt) - if err != nil { - return errors.Wrap(err, "failed to retrieve list of CephBlockPool") - } - for _, cephBlockPool := range cephBlockPoolList.Items { - if cephBlockPool.GetDeletionTimestamp() == nil && cephBlockPool.Spec.EnableRBDStats { - // list of CephBlockPool with enableRBDStats set to true and not marked for deletion - enableStatsForPools = append(enableStatsForPools, cephBlockPool.Name) - } - } - logger.Debugf("RBD per-image IO statistics will be collected for pools: %v", enableStatsForPools) - monStore := config.GetMonStore(clusterContext, clusterInfo) - if len(enableStatsForPools) == 0 { - err = monStore.Delete("mgr.", "mgr/prometheus/rbd_stats_pools") - } else { - err = monStore.Set("mgr.", "mgr/prometheus/rbd_stats_pools", strings.Join(enableStatsForPools, ",")) - } - if err != nil { - return errors.Wrapf(err, "failed to enable rbd_stats_pools") - } - logger.Debug("configured RBD per-image IO statistics collection") - return nil -} - -func (r *ReconcileCephBlockPool) cancelMirrorMonitoring(cephBlockPoolName string) { - // Close the channel to stop the mirroring status - close(r.blockPoolChannels[cephBlockPoolName].stopChan) - - // Remove ceph block pool from the map - delete(r.blockPoolChannels, cephBlockPoolName) -} diff --git a/pkg/operator/ceph/pool/controller_test.go b/pkg/operator/ceph/pool/controller_test.go deleted file mode 100644 index 2b8318c5c..000000000 --- a/pkg/operator/ceph/pool/controller_test.go +++ /dev/null @@ -1,520 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pool - -import ( - "context" - "os" - "testing" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/k8sutil" - testop "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - "github.com/tevino/abool" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func TestCreatePool(t *testing.T) { - clusterInfo := &cephclient.ClusterInfo{Namespace: "myns"} - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if command == "ceph" && args[1] == "erasure-code-profile" { - return `{"k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}`, nil - } - return "", nil - }, - } - context := &clusterd.Context{Executor: executor} - - p := &cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - - clusterSpec := &cephv1.ClusterSpec{Storage: cephv1.StorageScopeSpec{Config: map[string]string{cephclient.CrushRootConfigKey: "cluster-crush-root"}}} - err := createPool(context, clusterInfo, clusterSpec, p) - assert.Nil(t, err) - - // succeed with EC - p.Spec.Replicated.Size = 0 - p.Spec.ErasureCoded.CodingChunks = 1 - p.Spec.ErasureCoded.DataChunks = 2 - err = createPool(context, clusterInfo, clusterSpec, p) - assert.Nil(t, err) -} - -func TestDeletePool(t *testing.T) { - failOnDelete := false - clusterInfo := &cephclient.ClusterInfo{Namespace: "myns"} - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - emptyPool := "{\"images\":{\"count\":0,\"provisioned_bytes\":0,\"snap_count\":0},\"trash\":{\"count\":1,\"provisioned_bytes\":2048,\"snap_count\":0}}" - p := "{\"images\":{\"count\":1,\"provisioned_bytes\":1024,\"snap_count\":0},\"trash\":{\"count\":1,\"provisioned_bytes\":2048,\"snap_count\":0}}" - logger.Infof("Command: %s %v", command, args) - if command == "ceph" && args[1] == "lspools" { - return `[{"poolnum":1,"poolname":"mypool"}]`, nil - } else if command == "ceph" && args[1] == "pool" && args[2] == "get" { - return `{"pool": "mypool","pool_id": 1,"size":1}`, nil - } else if command == "ceph" && args[1] == "pool" && args[2] == "delete" { - return "", nil - } else if args[0] == "pool" { - if args[1] == "stats" { - if !failOnDelete { - return emptyPool, nil - } - return p, nil - } - return "", errors.Errorf("rbd: error opening pool %q: (2) No such file or directory", args[3]) - } - return "", errors.Errorf("unexpected rbd command %q", args) - }, - } - context := &clusterd.Context{Executor: executor} - - // delete a pool that exists - p := &cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - err := deletePool(context, clusterInfo, p) - assert.Nil(t, err) - - // succeed even if the pool doesn't exist - p = &cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "otherpool", Namespace: clusterInfo.Namespace}} - err = deletePool(context, clusterInfo, p) - assert.Nil(t, err) - - // fail if images/snapshosts exist in the pool - failOnDelete = true - p = &cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - err = deletePool(context, clusterInfo, p) - assert.NotNil(t, err) -} - -// TestCephBlockPoolController runs ReconcileCephBlockPool.Reconcile() against a -// fake client that tracks a CephBlockPool object. -func TestCephBlockPoolController(t *testing.T) { - ctx := context.TODO() - // Set DEBUG logging - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - os.Setenv("ROOK_LOG_LEVEL", "DEBUG") - - // - // TEST 1 SETUP - // - // FAILURE because no CephCluster - // - logger.Info("RUN 1") - var ( - name = "my-pool" - namespace = "rook-ceph" - replicas uint = 3 - ) - - // A Pool resource with metadata and spec. - pool := &cephv1.CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - UID: types.UID("c47cac40-9bee-4d52-823b-ccd803ba5bfe"), - }, - Spec: cephv1.PoolSpec{ - Replicated: cephv1.ReplicatedSpec{ - Size: replicas, - }, - Mirroring: cephv1.MirroringSpec{ - Peers: &cephv1.MirroringPeerSpec{}, - }, - StatusCheck: cephv1.MirrorHealthCheckSpec{ - Mirror: cephv1.HealthCheckSpec{ - Disabled: true, - }, - }, - }, - Status: &cephv1.CephBlockPoolStatus{ - Phase: "", - }, - } - - // Objects to track in the fake client. - object := []runtime.Object{ - pool, - } - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - - return "", nil - }, - } - c := &clusterd.Context{ - Executor: executor, - Clientset: testop.New(t, 1), - RookClientset: rookclient.NewSimpleClientset(), - RequestCancelOrchestration: abool.New(), - } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, pool, &cephv1.CephClusterList{}) - - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithRuntimeObjects(object...).Build() - - // Create a ReconcileCephBlockPool object with the scheme and fake client. - r := &ReconcileCephBlockPool{ - client: cl, - scheme: s, - context: c, - blockPoolChannels: make(map[string]*blockPoolHealth), - } - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - - // Create pool for updateCephBlockPoolStatus() - _, err := c.RookClientset.CephV1().CephBlockPools(namespace).Create(ctx, pool, metav1.CreateOptions{}) - assert.NoError(t, err) - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - - // - // TEST 2: - // - // FAILURE we have a cluster but it's not ready - // - logger.Info("RUN 2") - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephVersion: &cephv1.ClusterVersion{ - Version: "14.2.9-0", - }, - CephStatus: &cephv1.CephStatus{ - Health: "", - }, - }, - } - - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}, &cephv1.CephClusterList{}) - - // Create CephCluster for updateCephBlockPoolStatus() - _, err = c.RookClientset.CephV1().CephClusters(namespace).Create(ctx, cephCluster, metav1.CreateOptions{}) - assert.NoError(t, err) - - object = append(object, cephCluster) - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithRuntimeObjects(object...).Build() - // Create a ReconcileCephBlockPool object with the scheme and fake client. - r = &ReconcileCephBlockPool{ - client: cl, - scheme: s, - context: c, - blockPoolChannels: make(map[string]*blockPoolHealth), - } - assert.True(t, res.Requeue) - - // - // TEST 3: - // - // SUCCESS! The CephCluster is ready - // - logger.Info("RUN 3") - cephCluster.Status.Phase = cephv1.ConditionReady - cephCluster.Status.CephStatus.Health = "HEALTH_OK" - - objects := []runtime.Object{ - pool, - cephCluster, - } - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithRuntimeObjects(objects...).Build() - c.Client = cl - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "config" && args[2] == "mgr." && args[3] == "mgr/prometheus/rbd_stats_pools" { - return "", nil - } - - return "", nil - }, - } - c.Executor = executor - - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephBlockPoolList{}) - // Create a ReconcileCephBlockPool object with the scheme and fake client. - r = &ReconcileCephBlockPool{ - client: cl, - scheme: s, - context: c, - blockPoolChannels: make(map[string]*blockPoolHealth), - } - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - - err = r.client.Get(context.TODO(), req.NamespacedName, pool) - assert.NoError(t, err) - assert.Equal(t, cephv1.ConditionReady, pool.Status.Phase) - - // - // TEST 4: Mirroring - // No mirror mode set: failure - logger.Info("RUN 4") - pool.Spec.Mirroring.Enabled = true - err = r.client.Update(context.TODO(), pool) - assert.NoError(t, err) - res, err = r.Reconcile(ctx, req) - assert.Error(t, err) - assert.True(t, res.Requeue) - - // - // TEST 5: Mirroring - // mirror mode set: Success - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "mirror" && args[1] == "pool" && args[2] == "peer" && args[3] == "bootstrap" && args[4] == "create" { - return `eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ==`, nil - } - return "", nil - }, - } - c.Executor = executor - r = &ReconcileCephBlockPool{ - client: cl, - scheme: s, - context: c, - blockPoolChannels: make(map[string]*blockPoolHealth), - } - - pool.Spec.Mirroring.Mode = "image" - pool.Spec.Mirroring.Peers.SecretNames = []string{} - err = r.client.Update(context.TODO(), pool) - assert.NoError(t, err) - for i := 0; i < 5; i++ { - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, pool) - assert.NoError(t, err) - assert.Equal(t, cephv1.ConditionReady, pool.Status.Phase) - if _, ok := pool.Status.Info[opcontroller.RBDMirrorBootstrapPeerSecretName]; ok { - break - } - logger.Infof("FIX: trying again to update the mirroring status") - } - assert.NotEmpty(t, pool.Status.Info[opcontroller.RBDMirrorBootstrapPeerSecretName], pool.Status.Info) - - // fetch the secret - myPeerSecret, err := c.Clientset.CoreV1().Secrets(namespace).Get(ctx, pool.Status.Info[opcontroller.RBDMirrorBootstrapPeerSecretName], metav1.GetOptions{}) - assert.NoError(t, err) - if myPeerSecret != nil { - assert.NotEmpty(t, myPeerSecret.Data["token"], myPeerSecret.Data) - assert.NotEmpty(t, myPeerSecret.Data["pool"]) - } - - // - // TEST 6: Import peer token - - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - - // Create a ReconcileCephBlockPool object with the scheme and fake client. - r = &ReconcileCephBlockPool{ - client: cl, - scheme: s, - context: c, - blockPoolChannels: make(map[string]*blockPoolHealth), - } - - peerSecretName := "peer-secret" - pool.Spec.Mirroring.Peers.SecretNames = []string{peerSecretName} - err = r.client.Update(context.TODO(), pool) - assert.NoError(t, err) - res, err = r.Reconcile(ctx, req) - // assert reconcile failure because peer token secert was not created - assert.Error(t, err) - assert.True(t, res.Requeue) - - bootstrapPeerToken := `eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ==` //nolint:gosec // This is just a var name, not a real token - peerSecret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: peerSecretName, - Namespace: namespace, - }, - Data: map[string][]byte{"token": []byte(bootstrapPeerToken), "pool": []byte("goo")}, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, peerSecret, metav1.CreateOptions{}) - assert.NoError(t, err) - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, pool) - assert.NoError(t, err) - - // - // TEST 7: Mirroring disabled - r = &ReconcileCephBlockPool{ - client: cl, - scheme: s, - context: c, - blockPoolChannels: make(map[string]*blockPoolHealth), - } - pool.Spec.Mirroring.Enabled = false - pool.Spec.Mirroring.Mode = "image" - err = r.client.Update(context.TODO(), pool) - assert.NoError(t, err) - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, pool) - assert.NoError(t, err) - assert.Equal(t, cephv1.ConditionReady, pool.Status.Phase) - - assert.Nil(t, pool.Status.MirroringStatus) -} - -func TestConfigureRBDStats(t *testing.T) { - var ( - s = runtime.NewScheme() - context = &clusterd.Context{} - namespace = "rook-ceph" - ) - - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[0] == "config" && args[2] == "mgr." && args[3] == "mgr/prometheus/rbd_stats_pools" { - if args[1] == "set" && args[4] != "" { - return "", nil - } - if args[1] == "get" { - return "", nil - } - if args[1] == "rm" { - return "", nil - } - } - return "", errors.Errorf("unexpected arguments %q", args) - }, - } - - context.Executor = executor - context.Client = fake.NewClientBuilder().WithScheme(s).Build() - - clusterInfo := &cephclient.ClusterInfo{Namespace: namespace} - - // Case 1: CephBlockPoolList is not registered in scheme. - // So, an error is expected as List() operation would fail. - err := configureRBDStats(context, clusterInfo) - assert.NotNil(t, err) - - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephBlockPoolList{}) - // Case 2: CephBlockPoolList is registered in schema. - // So, no error is expected. - err = configureRBDStats(context, clusterInfo) - assert.Nil(t, err) - - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephBlockPool{}) - // A Pool resource with metadata and spec. - poolWithRBDStatsDisabled := &cephv1.CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-pool-without-rbd-stats", - Namespace: namespace, - }, - Spec: cephv1.PoolSpec{ - Replicated: cephv1.ReplicatedSpec{ - Size: 3, - }, - }, - } - - // Case 3: One CephBlockPool with EnableRBDStats:false (default). - objects := []runtime.Object{ - poolWithRBDStatsDisabled, - } - context.Client = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objects...).Build() - err = configureRBDStats(context, clusterInfo) - assert.Nil(t, err) - - // Case 4: Two CephBlockPools with EnableRBDStats:false & EnableRBDStats:true. - poolWithRBDStatsEnabled := poolWithRBDStatsDisabled.DeepCopy() - poolWithRBDStatsEnabled.Name = "my-pool-with-rbd-stats" - poolWithRBDStatsEnabled.Spec.EnableRBDStats = true - objects = append(objects, poolWithRBDStatsEnabled) - context.Client = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objects...).Build() - err = configureRBDStats(context, clusterInfo) - assert.Nil(t, err) - - // Case 5: Two CephBlockPools with EnableRBDStats:false & EnableRBDStats:true. - // SetConfig returns an error - context.Executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - return "", errors.New("mock error to simulate failure of mon store Set() function") - }, - } - err = configureRBDStats(context, clusterInfo) - assert.NotNil(t, err) -} diff --git a/pkg/operator/ceph/pool/health.go b/pkg/operator/ceph/pool/health.go deleted file mode 100644 index 1dd90d27b..000000000 --- a/pkg/operator/ceph/pool/health.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pool - -import ( - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -var ( - defaultHealthCheckInterval = 1 * time.Minute -) - -type mirrorChecker struct { - context *clusterd.Context - interval *time.Duration - client client.Client - clusterInfo *cephclient.ClusterInfo - namespacedName types.NamespacedName - poolSpec *cephv1.PoolSpec - poolName string -} - -// newMirrorChecker creates a new HealthChecker object -func newMirrorChecker(context *clusterd.Context, client client.Client, clusterInfo *cephclient.ClusterInfo, namespacedName types.NamespacedName, poolSpec *cephv1.PoolSpec, poolName string) *mirrorChecker { - c := &mirrorChecker{ - context: context, - interval: &defaultHealthCheckInterval, - clusterInfo: clusterInfo, - namespacedName: namespacedName, - client: client, - poolSpec: poolSpec, - poolName: poolName, - } - - // allow overriding the check interval - checkInterval := poolSpec.StatusCheck.Mirror.Interval - if checkInterval != nil { - logger.Infof("pool mirroring status check interval for block pool %q is %q", namespacedName.Name, checkInterval.Duration.String()) - c.interval = &checkInterval.Duration - } - - return c -} - -// checkMirroring periodically checks the health of the cluster -func (c *mirrorChecker) checkMirroring(stopCh chan struct{}) { - // check the mirroring health immediately before starting the loop - err := c.checkMirroringHealth() - if err != nil { - c.updateStatusMirroring(nil, nil, nil, err.Error()) - logger.Debugf("failed to check pool mirroring status for ceph block pool %q. %v", c.namespacedName.Name, err) - } - - for { - select { - case <-stopCh: - logger.Infof("stopping monitoring pool mirroring status %q", c.namespacedName.Name) - return - - case <-time.After(*c.interval): - logger.Debugf("checking pool mirroring status %q", c.namespacedName.Name) - err := c.checkMirroringHealth() - if err != nil { - c.updateStatusMirroring(nil, nil, nil, err.Error()) - logger.Debugf("failed to check pool mirroring status for ceph block pool %q. %v", c.namespacedName.Name, err) - } - } - } -} - -func (c *mirrorChecker) checkMirroringHealth() error { - // Check mirroring status - mirrorStatus, err := cephclient.GetPoolMirroringStatus(c.context, c.clusterInfo, c.poolName) - if err != nil { - c.updateStatusMirroring(nil, nil, nil, err.Error()) - } - - // Check mirroring info - mirrorInfo, err := cephclient.GetPoolMirroringInfo(c.context, c.clusterInfo, c.poolName) - if err != nil { - c.updateStatusMirroring(nil, nil, nil, err.Error()) - } - - // If snapshot scheduling is enabled let's add it to the status - // snapSchedStatus := cephclient.SnapshotScheduleStatus{} - snapSchedStatus := []cephv1.SnapshotSchedulesSpec{} - if c.poolSpec.Mirroring.SnapshotSchedulesEnabled() { - snapSchedStatus, err = cephclient.ListSnapshotSchedulesRecursively(c.context, c.clusterInfo, c.poolName) - if err != nil { - c.updateStatusMirroring(nil, nil, nil, err.Error()) - } - } - - // On success - c.updateStatusMirroring(mirrorStatus.Summary, mirrorInfo, snapSchedStatus, "") - - return nil -} diff --git a/pkg/operator/ceph/pool/peers.go b/pkg/operator/ceph/pool/peers.go deleted file mode 100644 index 496a4bcca..000000000 --- a/pkg/operator/ceph/pool/peers.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pool - -import ( - "context" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/daemon/ceph/client" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ReconcileCephBlockPool) reconcileAddBoostrapPeer(pool *cephv1.CephBlockPool, - namespacedName types.NamespacedName) (reconcile.Result, error) { - - if pool.Spec.Mirroring.Peers == nil { - return reconcile.Result{}, nil - } - - // List all the peers secret, we can have more than one peer we might want to configure - // For each, get the Kubernetes Secret and import the "peer token" so that we can configure the mirroring - for _, peerSecret := range pool.Spec.Mirroring.Peers.SecretNames { - logger.Debugf("fetching bootstrap peer kubernetes secret %q", peerSecret) - s, err := r.context.Clientset.CoreV1().Secrets(r.clusterInfo.Namespace).Get(context.TODO(), peerSecret, metav1.GetOptions{}) - // We don't care about IsNotFound here, we still need to fail - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to fetch kubernetes secret %q bootstrap peer", peerSecret) - } - - // Validate peer secret content - err = opcontroller.ValidatePeerToken(pool, s.Data) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to validate rbd-mirror bootstrap peer secret %q data", peerSecret) - } - - // Import bootstrap peer - err = client.ImportRBDMirrorBootstrapPeer(r.context, r.clusterInfo, pool.Name, string(s.Data["direction"]), s.Data["token"]) - if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to import bootstrap peer token") - } - } - - return reconcile.Result{}, nil -} diff --git a/pkg/operator/ceph/pool/status.go b/pkg/operator/ceph/pool/status.go deleted file mode 100644 index 9c8b5b4d3..000000000 --- a/pkg/operator/ceph/pool/status.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package pool to manage a rook pool. -package pool - -import ( - "context" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/ceph/reporting" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// updateStatus updates a pool CR with the given status -func updateStatus(client client.Client, poolName types.NamespacedName, status cephv1.ConditionType, info map[string]string) { - pool := &cephv1.CephBlockPool{} - err := client.Get(context.TODO(), poolName, pool) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephBlockPool resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve pool %q to update status to %q. %v", poolName, status, err) - return - } - - if pool.Status == nil { - pool.Status = &cephv1.CephBlockPoolStatus{} - } - - pool.Status.Phase = status - pool.Status.Info = info - if err := reporting.UpdateStatus(client, pool); err != nil { - logger.Warningf("failed to set pool %q status to %q. %v", pool.Name, status, err) - return - } - logger.Debugf("pool %q status updated to %q", poolName, status) -} - -// updateStatusBucket updates an object with a given status -func (c *mirrorChecker) updateStatusMirroring(mirrorStatus *cephv1.PoolMirroringStatusSummarySpec, mirrorInfo *cephv1.PoolMirroringInfo, snapSchedStatus []cephv1.SnapshotSchedulesSpec, details string) { - blockPool := &cephv1.CephBlockPool{} - if err := c.client.Get(context.TODO(), c.namespacedName, blockPool); err != nil { - if kerrors.IsNotFound(err) { - logger.Debug("CephBlockPool resource not found. Ignoring since object must be deleted.") - return - } - logger.Warningf("failed to retrieve ceph block pool %q to update mirroring status. %v", c.namespacedName.Name, err) - return - } - if blockPool.Status == nil { - blockPool.Status = &cephv1.CephBlockPoolStatus{} - } - - // Update the CephBlockPool CR status field - blockPool.Status.MirroringStatus, blockPool.Status.MirroringInfo, blockPool.Status.SnapshotScheduleStatus = toCustomResourceStatus(blockPool.Status.MirroringStatus, mirrorStatus, blockPool.Status.MirroringInfo, mirrorInfo, blockPool.Status.SnapshotScheduleStatus, snapSchedStatus, details) - if err := reporting.UpdateStatus(c.client, blockPool); err != nil { - logger.Errorf("failed to set ceph block pool %q mirroring status. %v", c.namespacedName.Name, err) - return - } - - logger.Debugf("ceph block pool %q mirroring status updated", c.namespacedName.Name) -} - -func toCustomResourceStatus(currentStatus *cephv1.MirroringStatusSpec, mirroringStatus *cephv1.PoolMirroringStatusSummarySpec, - currentInfo *cephv1.MirroringInfoSpec, mirroringInfo *cephv1.PoolMirroringInfo, - currentSnapSchedStatus *cephv1.SnapshotScheduleStatusSpec, snapSchedStatus []cephv1.SnapshotSchedulesSpec, - details string) (*cephv1.MirroringStatusSpec, *cephv1.MirroringInfoSpec, *cephv1.SnapshotScheduleStatusSpec) { - mirroringStatusSpec := &cephv1.MirroringStatusSpec{} - mirroringInfoSpec := &cephv1.MirroringInfoSpec{} - snapshotScheduleStatusSpec := &cephv1.SnapshotScheduleStatusSpec{} - - // mirroringStatus will be nil in case of an error to fetch it - if mirroringStatus != nil { - mirroringStatusSpec.LastChecked = time.Now().UTC().Format(time.RFC3339) - mirroringStatusSpec.Summary = mirroringStatus - } - - // Always display the details, typically an error - mirroringStatusSpec.Details = details - - if currentStatus != nil { - mirroringStatusSpec.LastChanged = currentStatus.LastChanged - } - - // mirroringInfo will be nil in case of an error to fetch it - if mirroringInfo != nil { - mirroringInfoSpec.LastChecked = time.Now().UTC().Format(time.RFC3339) - mirroringInfoSpec.PoolMirroringInfo = mirroringInfo - } - // Always display the details, typically an error - mirroringInfoSpec.Details = details - - if currentInfo != nil { - mirroringInfoSpec.LastChanged = currentInfo.LastChecked - } - - // snapSchedStatus will be nil in case of an error to fetch it - if len(snapSchedStatus) != 0 { - snapshotScheduleStatusSpec.LastChecked = time.Now().UTC().Format(time.RFC3339) - snapshotScheduleStatusSpec.SnapshotSchedules = snapSchedStatus - } - // Always display the details, typically an error - snapshotScheduleStatusSpec.Details = details - - if currentSnapSchedStatus != nil { - snapshotScheduleStatusSpec.LastChanged = currentSnapSchedStatus.LastChecked - } - - return mirroringStatusSpec, mirroringInfoSpec, snapshotScheduleStatusSpec -} diff --git a/pkg/operator/ceph/pool/status_test.go b/pkg/operator/ceph/pool/status_test.go deleted file mode 100644 index f9723a1a5..000000000 --- a/pkg/operator/ceph/pool/status_test.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package pool to manage a rook pool. -package pool - -import ( - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/stretchr/testify/assert" -) - -func TestToCustomResourceStatus(t *testing.T) { - mirroringStatus := &cephv1.PoolMirroringStatusSummarySpec{} - mirroringStatus.Health = "HEALTH_OK" - mirroringInfo := &cephv1.PoolMirroringInfo{ - Mode: "pool", - SiteName: "rook-ceph-emea", - Peers: []cephv1.PeersSpec{ - {UUID: "82656994-3314-4996-ac4c-263c2c9fd081"}, - }, - } - - // Test 1: Empty so it's disabled - { - newMirroringStatus, newMirroringInfo, _ := toCustomResourceStatus(&cephv1.MirroringStatusSpec{}, mirroringStatus, &cephv1.MirroringInfoSpec{}, mirroringInfo, &cephv1.SnapshotScheduleStatusSpec{}, []cephv1.SnapshotSchedulesSpec{}, "") - assert.NotEmpty(t, newMirroringStatus.Summary) - assert.Equal(t, "HEALTH_OK", newMirroringStatus.Summary.Health) - assert.Equal(t, "pool", newMirroringInfo.Mode) - } - - // Test 2: snap sched - { - snapSchedStatus := []cephv1.SnapshotSchedulesSpec{ - { - Pool: "my-pool", - Image: "pool/image", - }, - } - newMirroringStatus, newMirroringInfo, newSnapshotScheduleStatus := toCustomResourceStatus(&cephv1.MirroringStatusSpec{}, mirroringStatus, &cephv1.MirroringInfoSpec{}, mirroringInfo, &cephv1.SnapshotScheduleStatusSpec{}, snapSchedStatus, "") - assert.NotEmpty(t, newMirroringStatus.Summary) - assert.Equal(t, "HEALTH_OK", newMirroringStatus.Summary.Health) - assert.NotEmpty(t, newMirroringInfo.Mode, "pool") - assert.NotEmpty(t, newSnapshotScheduleStatus) - } -} diff --git a/pkg/operator/ceph/pool/validate.go b/pkg/operator/ceph/pool/validate.go deleted file mode 100644 index f96864627..000000000 --- a/pkg/operator/ceph/pool/validate.go +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package pool to manage a rook pool. -package pool - -import ( - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" -) - -// ValidatePool Validate the pool arguments -func ValidatePool(context *clusterd.Context, clusterInfo *client.ClusterInfo, clusterSpec *cephv1.ClusterSpec, p *cephv1.CephBlockPool) error { - if p.Name == "" { - return errors.New("missing name") - } - if p.Namespace == "" { - return errors.New("missing namespace") - } - if err := ValidatePoolSpec(context, clusterInfo, clusterSpec, &p.Spec); err != nil { - return err - } - return nil -} - -// ValidatePoolSpec validates the Ceph block pool spec CR -func ValidatePoolSpec(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, clusterSpec *cephv1.ClusterSpec, p *cephv1.PoolSpec) error { - - if p.IsHybridStoragePool() { - err := validateDeviceClasses(context, clusterInfo, p) - if err != nil { - return errors.Wrap(err, "failed to validate device classes for hybrid storage pool spec") - } - } - - if p.IsReplicated() && p.IsErasureCoded() { - return errors.New("both replication and erasure code settings cannot be specified") - } - - if p.FailureDomain != "" && p.Replicated.SubFailureDomain != "" { - if p.FailureDomain == p.Replicated.SubFailureDomain { - return errors.New("failure and subfailure domain cannot be identical") - } - } - - // validate pools for stretch clusters - if clusterSpec.IsStretchCluster() { - if p.IsReplicated() { - if p.Replicated.Size != 4 { - return errors.New("pools in a stretch cluster must have replication size 4") - } - } - if p.IsErasureCoded() { - return errors.New("erasure coded pools are not supported in stretch clusters") - } - } - - var crush client.CrushMap - var err error - if p.FailureDomain != "" || p.CrushRoot != "" { - crush, err = client.GetCrushMap(context, clusterInfo) - if err != nil { - return errors.Wrap(err, "failed to get crush map") - } - } - - // validate the failure domain if specified - if p.FailureDomain != "" { - found := false - for _, t := range crush.Types { - if t.Name == p.FailureDomain { - found = true - break - } - } - if !found { - return errors.Errorf("unrecognized failure domain %s", p.FailureDomain) - } - } - - // validate the crush root if specified - if p.CrushRoot != "" { - found := false - for _, t := range crush.Buckets { - if t.Name == p.CrushRoot { - found = true - break - } - } - if !found { - return errors.Errorf("unrecognized crush root %s", p.CrushRoot) - } - } - - // validate the crush subdomain if specified - if p.Replicated.SubFailureDomain != "" { - found := false - for _, t := range crush.Types { - if t.Name == p.Replicated.SubFailureDomain { - found = true - break - } - } - if !found { - return errors.Errorf("unrecognized crush sub domain %s", p.Replicated.SubFailureDomain) - } - } - - // validate pool replica size - if p.IsReplicated() { - if p.Replicated.Size == 1 && p.Replicated.RequireSafeReplicaSize { - return errors.Errorf("error pool size is %d and requireSafeReplicaSize is %t, must be false", p.Replicated.Size, p.Replicated.RequireSafeReplicaSize) - } - - if p.Replicated.Size <= p.Replicated.ReplicasPerFailureDomain { - return errors.Errorf("error pool size is %d and replicasPerFailureDomain is %d, size must be greater", p.Replicated.Size, p.Replicated.ReplicasPerFailureDomain) - } - - if p.Replicated.ReplicasPerFailureDomain != 0 && p.Replicated.Size%p.Replicated.ReplicasPerFailureDomain != 0 { - return errors.Errorf("error replicasPerFailureDomain is %d must be a factor of the replica count %d", p.Replicated.ReplicasPerFailureDomain, p.Replicated.Size) - } - } - - // validate pool compression mode if specified - if p.CompressionMode != "" { - switch p.CompressionMode { - case "none", "passive", "aggressive", "force": - break - default: - return errors.Errorf("unrecognized compression mode %q", p.CompressionMode) - } - } - - // Validate mirroring settings - if p.Mirroring.Enabled { - switch p.Mirroring.Mode { - case "image", "pool": - break - default: - return errors.Errorf("unrecognized mirroring mode %q. only 'image and 'pool' are supported", p.Mirroring.Mode) - } - - if p.Mirroring.SnapshotSchedulesEnabled() { - for _, snapSchedule := range p.Mirroring.SnapshotSchedules { - if snapSchedule.Interval == "" && snapSchedule.StartTime != "" { - return errors.New("schedule interval cannot be empty if start time is specified") - } - } - } - } - - if !p.Mirroring.Enabled && p.Mirroring.SnapshotSchedulesEnabled() { - logger.Warning("mirroring must be enabled to configure snapshot scheduling") - } - - return nil -} - -// validateDeviceClasses validates the primary and secondary device classes in the HybridStorageSpec -func validateDeviceClasses(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, - p *cephv1.PoolSpec) error { - - primaryDeviceClass := p.Replicated.HybridStorage.PrimaryDeviceClass - secondaryDeviceClass := p.Replicated.HybridStorage.SecondaryDeviceClass - - err := validateDeviceClassOSDs(context, clusterInfo, primaryDeviceClass) - if err != nil { - return errors.Wrapf(err, "failed to validate primary device class %q", primaryDeviceClass) - } - - err = validateDeviceClassOSDs(context, clusterInfo, secondaryDeviceClass) - if err != nil { - return errors.Wrapf(err, "failed to validate secondary device class %q", secondaryDeviceClass) - } - - return nil -} - -// validateDeviceClassOSDs validates that the device class should have atleast one OSD -func validateDeviceClassOSDs(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, - deviceClassName string) error { - deviceClassOSDs, err := cephclient.GetDeviceClassOSDs(context, clusterInfo, deviceClassName) - if err != nil { - return errors.Wrapf(err, "failed to get osds for the device class %q", deviceClassName) - } - if len(deviceClassOSDs) == 0 { - return errors.Errorf("no osds available for the device class %q", deviceClassName) - } - - return nil -} diff --git a/pkg/operator/ceph/pool/validate_test.go b/pkg/operator/ceph/pool/validate_test.go deleted file mode 100644 index 8bd1faf5b..000000000 --- a/pkg/operator/ceph/pool/validate_test.go +++ /dev/null @@ -1,302 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package pool to manage a rook pool. -package pool - -import ( - "testing" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestValidatePool(t *testing.T) { - context := &clusterd.Context{Executor: &exectest.MockExecutor{}} - clusterInfo := &cephclient.ClusterInfo{Namespace: "myns"} - clusterSpec := &cephv1.ClusterSpec{} - - // not specifying some replication or EC settings is fine - p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - err := ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) - - // must specify name - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Namespace: clusterInfo.Namespace}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NotNil(t, err) - - // must specify namespace - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool"}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NotNil(t, err) - - // must not specify both replication and EC settings - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.ErasureCoded.CodingChunks = 2 - p.Spec.ErasureCoded.DataChunks = 3 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NotNil(t, err) - - // succeed with replication settings - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) - - // size is 1 and RequireSafeReplicaSize is true - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = true - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // succeed with ec settings - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.ErasureCoded.CodingChunks = 1 - p.Spec.ErasureCoded.DataChunks = 2 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) - - // Tests with various compression modes - // succeed with compression mode "none" - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.CompressionMode = "none" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) - - // succeed with compression mode "aggressive" - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.CompressionMode = "aggressive" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) - - // fail with compression mode "unsupported" - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.CompressionMode = "unsupported" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // fail since replica size is lower than ReplicasPerFailureDomain - p.Spec.Replicated.ReplicasPerFailureDomain = 2 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // fail since replica size is equal than ReplicasPerFailureDomain - p.Spec.Replicated.Size = 2 - p.Spec.Replicated.ReplicasPerFailureDomain = 2 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // fail since ReplicasPerFailureDomain is not a power of 2 - p.Spec.Replicated.Size = 4 - p.Spec.Replicated.ReplicasPerFailureDomain = 3 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // fail since ReplicasPerFailureDomain is not a power of 2 - p.Spec.Replicated.Size = 4 - p.Spec.Replicated.ReplicasPerFailureDomain = 5 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // Failure the sub domain does not exist - p.Spec.Replicated.SubFailureDomain = "dummy" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // succeed with ec pool and valid compression mode - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.ErasureCoded.CodingChunks = 1 - p.Spec.ErasureCoded.DataChunks = 2 - p.Spec.CompressionMode = "passive" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) - - // Add mirror test mode - { - p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Mirroring.Enabled = true - p.Spec.Mirroring.Mode = "foo" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - assert.EqualError(t, err, "unrecognized mirroring mode \"foo\". only 'image and 'pool' are supported") - - // Success mode is known - p.Spec.Mirroring.Mode = "pool" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NoError(t, err) - - // Error no interval specified - p.Spec.Mirroring.SnapshotSchedules = []cephv1.SnapshotScheduleSpec{{StartTime: "14:00:00-05:00"}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - assert.EqualError(t, err, "schedule interval cannot be empty if start time is specified") - - // Success we have an interval - p.Spec.Mirroring.SnapshotSchedules = []cephv1.SnapshotScheduleSpec{{Interval: "24h"}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NoError(t, err) - } - - // Failure and subfailure domains - { - p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.FailureDomain = "host" - p.Spec.Replicated.SubFailureDomain = "host" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - assert.EqualError(t, err, "failure and subfailure domain cannot be identical") - } - -} - -func TestValidateCrushProperties(t *testing.T) { - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - clusterInfo := &cephclient.ClusterInfo{Namespace: "myns"} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("Command: %s %v", command, args) - if args[1] == "crush" && args[2] == "dump" { - return `{"types":[{"type_id": 0,"name": "osd"}],"buckets":[{"id": -1,"name":"default"},{"id": -2,"name":"good"}, {"id": -3,"name":"host"}]}`, nil - } - return "", errors.Errorf("unexpected ceph command %q", args) - } - - // succeed with a failure domain that exists - p := &cephv1.CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}, - Spec: cephv1.PoolSpec{ - Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}, - }, - } - clusterSpec := &cephv1.ClusterSpec{} - - err := ValidatePool(context, clusterInfo, clusterSpec, p) - assert.Nil(t, err) - - // fail with a failure domain that doesn't exist - p.Spec.FailureDomain = "doesntexist" - err = ValidatePool(context, clusterInfo, clusterSpec, p) - assert.NotNil(t, err) - - // fail with a crush root that doesn't exist - p.Spec.FailureDomain = "osd" - p.Spec.CrushRoot = "bad" - err = ValidatePool(context, clusterInfo, clusterSpec, p) - assert.NotNil(t, err) - - // fail with a crush root that does exist - p.Spec.CrushRoot = "good" - err = ValidatePool(context, clusterInfo, clusterSpec, p) - assert.Nil(t, err) - - // Success replica size is 4 and replicasPerFailureDomain is 2 - p.Spec.Replicated.Size = 4 - p.Spec.Replicated.ReplicasPerFailureDomain = 2 - err = ValidatePool(context, clusterInfo, clusterSpec, p) - assert.NoError(t, err) -} - -func TestValidateDeviceClasses(t *testing.T) { - testcases := []struct { - name string - primaryDeviceClassOutput string - secondaryDeviceClassOutput string - hybridStorageSpec *cephv1.HybridStorageSpec - isValidSpec bool - }{ - { - name: "valid hybridStorageSpec", - primaryDeviceClassOutput: "[0, 1, 2]", - secondaryDeviceClassOutput: "[3, 4, 5]", - hybridStorageSpec: &cephv1.HybridStorageSpec{ - PrimaryDeviceClass: "ssd", - SecondaryDeviceClass: "hdd", - }, - isValidSpec: true, - }, - { - name: "invalid hybridStorageSpec.PrimaryDeviceClass", - primaryDeviceClassOutput: "[]", - secondaryDeviceClassOutput: "[3, 4, 5]", - hybridStorageSpec: &cephv1.HybridStorageSpec{ - PrimaryDeviceClass: "ssd", - SecondaryDeviceClass: "hdd", - }, - isValidSpec: false, - }, - { - name: "invalid hybridStorageSpec.SecondaryDeviceClass", - primaryDeviceClassOutput: "[0, 1, 2]", - secondaryDeviceClassOutput: "[]", - hybridStorageSpec: &cephv1.HybridStorageSpec{ - PrimaryDeviceClass: "ssd", - SecondaryDeviceClass: "hdd", - }, - isValidSpec: false, - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - clusterInfo := &cephclient.ClusterInfo{Namespace: "myns"} - executor := &exectest.MockExecutor{} - context := &clusterd.Context{Executor: executor} - executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { - logger.Infof("ExecuteCommandWithOutputFile: %s %v", command, args) - if args[1] == "crush" && args[2] == "class" && args[3] == "ls-osd" && args[4] == "ssd" { - // Mock executor for `ceph osd crush class ls-osd ssd` - return tc.primaryDeviceClassOutput, nil - } else if args[1] == "crush" && args[2] == "class" && args[3] == "ls-osd" && args[4] == "hdd" { - // Mock executor for `ceph osd crush class ls-osd hdd` - return tc.secondaryDeviceClassOutput, nil - } - return "", nil - } - - p := &cephv1.CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}, - Spec: cephv1.PoolSpec{ - Replicated: cephv1.ReplicatedSpec{ - HybridStorage: tc.hybridStorageSpec, - }, - }, - } - - err := validateDeviceClasses(context, clusterInfo, &p.Spec) - if tc.isValidSpec { - assert.NoError(t, err) - } else { - assert.Error(t, err) - } - }) - } -} diff --git a/pkg/operator/ceph/provisioner/provisioner.go b/pkg/operator/ceph/provisioner/provisioner.go deleted file mode 100644 index 3caac0a69..000000000 --- a/pkg/operator/ceph/provisioner/provisioner.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package provisioner to provision Rook volumes on Kubernetes. -package provisioner - -import ( - "context" - "fmt" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume" - ceph "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" -) - -const ( - storageClassBetaAnnotationKey = "volume.beta.kubernetes.io/storage-class" - sizeMB = 1048576 // 1 MB -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-provisioner") - -// RookVolumeProvisioner is used to provision Rook volumes on Kubernetes -type RookVolumeProvisioner struct { - context *clusterd.Context - - // The flex driver vendor dir to use - flexDriverVendor string -} - -type provisionerConfig struct { - // Required: The pool name to provision volumes from. - blockPool string - - // Optional: Name of the cluster. Default is `rook` - clusterNamespace string - - // Optional: File system type used for mounting the image. Default is `ext4` - fstype string - - // Optional: For erasure coded pools the data pool must be given - dataBlockPool string -} - -// New creates RookVolumeProvisioner -func New(context *clusterd.Context, flexDriverVendor string) controller.Provisioner { - return &RookVolumeProvisioner{ - context: context, - flexDriverVendor: flexDriverVendor, - } -} - -// Provision creates a storage asset and returns a PV object representing it. -func (p *RookVolumeProvisioner) Provision(_ context.Context, options controller.ProvisionOptions) (*v1.PersistentVolume, controller.ProvisioningState, error) { - - var err error - if options.PVC.Spec.Selector != nil { - return nil, controller.ProvisioningFinished, errors.New("claim Selector is not supported") - } - - cfg, err := parseClassParameters(options.StorageClass.Parameters) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - logger.Infof("creating volume with configuration %+v", *cfg) - - capacity := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - requestBytes := capacity.Value() - - imageName := options.PVName - - storageClass, err := parseStorageClass(options) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - clusterInfo := ceph.AdminClusterInfo(cfg.clusterNamespace) - blockImage, err := p.createVolume(clusterInfo, imageName, cfg.blockPool, cfg.dataBlockPool, requestBytes) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - // the size of the PV needs to be at least as large as the size in the PVC - // or binding won't be successful. createVolume uses the requestBytes - // parameter as a target, and guarantees that the size created as at least - // that large. the adjusted value is placed in blockImage.Size and it is - // suitable to be converted into Mi. - // - // note that the rounding error that can occur if the original non-adjusted - // request is used in the original formulation here: - // - // s := fmt.Sprintf("%dMi", blockImage.Size/sizeMB) - // Size = 500M = 500,000,000 bytes - // 500M / 2**20 = 476 - // 476Mi = 476 * 2**20 = 499122176 < 500M - // - s := fmt.Sprintf("%dMi", blockImage.Size/sizeMB) - quantity, err := resource.ParseQuantity(s) - if err != nil { - return nil, controller.ProvisioningFinished, errors.Wrapf(err, "cannot parse %q", s) - } - - driverName, err := flexvolume.RookDriverName(p.context) - if err != nil { - return nil, controller.ProvisioningFinished, errors.Wrap(err, "failed to get driver name") - } - - flexdriver := fmt.Sprintf("%s/%s", p.flexDriverVendor, driverName) - pv := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: imageName, - }, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: *options.StorageClass.ReclaimPolicy, - AccessModes: options.PVC.Spec.AccessModes, - Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): quantity, - }, - PersistentVolumeSource: v1.PersistentVolumeSource{ - FlexVolume: &v1.FlexPersistentVolumeSource{ - Driver: flexdriver, - FSType: cfg.fstype, - Options: map[string]string{ - flexvolume.StorageClassKey: storageClass, - flexvolume.PoolKey: cfg.blockPool, - flexvolume.ImageKey: imageName, - flexvolume.ClusterNamespaceKey: cfg.clusterNamespace, - flexvolume.DataBlockPoolKey: cfg.dataBlockPool, - }, - }, - }, - }, - } - logger.Infof("successfully created Rook Block volume %+v", pv.Spec.PersistentVolumeSource.FlexVolume) - return pv, controller.ProvisioningFinished, nil -} - -// createVolume creates a rook block volume. -func (p *RookVolumeProvisioner) createVolume(clusterInfo *ceph.ClusterInfo, image, pool, dataPool string, size int64) (*ceph.CephBlockImage, error) { - if image == "" || pool == "" || clusterInfo.Namespace == "" || size == 0 { - return nil, errors.Errorf("image missing required fields (image=%s, pool=%s, clusterNamespace=%s, size=%d)", image, pool, clusterInfo.Namespace, size) - } - - createdImage, err := ceph.CreateImage(p.context, clusterInfo, image, pool, dataPool, uint64(size)) - if err != nil { - return nil, errors.Wrapf(err, "failed to create rook block image %s/%s", pool, image) - } - logger.Infof("Rook block image created: %s, size = %d", createdImage.Name, createdImage.Size) - - return createdImage, nil -} - -// Delete removes the storage asset that was created by Provision represented -// by the given PV. -// Right now, we are not using the 'context.Context' argument, so ignoring it. -func (p *RookVolumeProvisioner) Delete(_ context.Context, volume *v1.PersistentVolume) error { - logger.Infof("Deleting volume %s", volume.Name) - if volume.Spec.PersistentVolumeSource.FlexVolume == nil { - return errors.Errorf("Failed to delete rook block image %s: %s", volume.Name, "PersistentVolume is not a FlexVolume") - } - if volume.Spec.PersistentVolumeSource.FlexVolume.Options == nil { - return errors.Errorf("Failed to delete rook block image %s: %s", volume.Name, "PersistentVolume has no image defined for the FlexVolume") - } - name := volume.Spec.PersistentVolumeSource.FlexVolume.Options[flexvolume.ImageKey] - pool := volume.Spec.PersistentVolumeSource.FlexVolume.Options[flexvolume.PoolKey] - var clusterns string - if _, ok := volume.Spec.PersistentVolumeSource.FlexVolume.Options[flexvolume.ClusterNamespaceKey]; ok { - clusterns = volume.Spec.PersistentVolumeSource.FlexVolume.Options[flexvolume.ClusterNamespaceKey] - } else if _, ok := volume.Spec.PersistentVolumeSource.FlexVolume.Options[flexvolume.ClusterNameKey]; ok { - // Fallback to `clusterName` as it was used in Rook version earlier v0.8 - clusterns = volume.Spec.PersistentVolumeSource.FlexVolume.Options[flexvolume.ClusterNameKey] - } - if clusterns == "" { - return errors.Errorf("failed to delete rook block image %s/%s: no clusterNamespace or (deprecated) clusterName option given", pool, volume.Name) - } - clusterInfo := ceph.AdminClusterInfo(clusterns) - err := ceph.DeleteImage(p.context, clusterInfo, name, pool) - if err != nil { - return errors.Wrapf(err, "failed to delete rook block image %s/%s", pool, volume.Name) - } - logger.Infof("succeeded deleting volume %+v", volume) - return nil -} - -func parseStorageClass(options controller.ProvisionOptions) (string, error) { - if options.PVC.Spec.StorageClassName != nil { - return *options.PVC.Spec.StorageClassName, nil - } - - // PVC manifest is from 1.5. Check annotation. - if val, ok := options.PVC.Annotations[storageClassBetaAnnotationKey]; ok { - return val, nil - } - - return "", errors.Errorf("failed to get storageclass from PVC %s/%s", options.PVC.Namespace, options.PVC.Name) -} - -func parseClassParameters(params map[string]string) (*provisionerConfig, error) { - var cfg provisionerConfig - - for k, v := range params { - switch strings.ToLower(k) { - case "pool": - cfg.blockPool = v - case "blockpool": - cfg.blockPool = v - case "clusternamespace": - cfg.clusterNamespace = v - case "clustername": - cfg.clusterNamespace = v - case "fstype": - cfg.fstype = v - case "datablockpool": - cfg.dataBlockPool = v - default: - return nil, errors.Errorf("invalid option %q for volume plugin %s", k, "rookVolumeProvisioner") - } - } - - if len(cfg.blockPool) == 0 { - return nil, errors.Errorf("StorageClass for provisioner %s must contain 'blockPool' parameter", "rookVolumeProvisioner") - } - - if len(cfg.clusterNamespace) == 0 { - cfg.clusterNamespace = cluster.DefaultClusterName - } - - return &cfg, nil -} diff --git a/pkg/operator/ceph/provisioner/provisioner_test.go b/pkg/operator/ceph/provisioner/provisioner_test.go deleted file mode 100644 index ba5ecfa1b..000000000 --- a/pkg/operator/ceph/provisioner/provisioner_test.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*/ - -package provisioner - -import ( - "context" - "io/ioutil" - "os" - "path" - "strings" - "testing" - - "github.com/rook/rook/pkg/clusterd" - clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" - "github.com/rook/rook/pkg/operator/test" - exectest "github.com/rook/rook/pkg/util/exec/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - storage "k8s.io/api/storage/v1" - storagebeta "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" -) - -func TestProvisionImage(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - namespace := "ns" - configDir, _ := ioutil.TempDir("", "") - os.Setenv("POD_NAMESPACE", "rook-ceph") - defer os.Setenv("POD_NAMESPACE", "") - defer os.RemoveAll(configDir) - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if strings.Contains(command, "ceph-authtool") { - err := clienttest.CreateConfigDir(path.Join(configDir, namespace)) - assert.Nil(t, err) - } - - if command == "rbd" && args[0] == "create" { - return `[{"image":"pvc-uid-1-1","size":1048576,"format":2}]`, nil - } - - if command == "rbd" && args[0] == "info" { - assert.Equal(t, "testpool/pvc-uid-1-1", args[1]) - return `{"name":"pvc-uid-1-1","size":1048576,"objects":1,"order":20,"object_size":1048576,"block_name_prefix":"testpool_data.229226b8b4567",` + - `"format":2,"features":["layering"],"op_features":[],"flags":[],"create_timestamp":"Fri Oct 5 19:46:20 2018"}`, nil - } - return "", nil - }, - } - - context := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - ConfigDir: configDir, - } - - provisioner := New(context, "foo.io") - volume := newProvisionOptions(newStorageClass("class-1", "foo.io/block", map[string]string{"pool": "testpool", "clusterNamespace": "testCluster", "fsType": "ext3", "dataBlockPool": ""}, v1.PersistentVolumeReclaimRetain), newClaim("claim-1", "uid-1-1", "class-1", "", "class-1", nil), v1.PersistentVolumeReclaimRetain) - - pv, ps, err := provisioner.Provision(ctx, volume) - assert.Nil(t, err) - - assert.Equal(t, controller.ProvisioningFinished, ps) - assert.Equal(t, "pvc-uid-1-1", pv.Name) - assert.NotNil(t, pv.Spec.PersistentVolumeSource.FlexVolume) - assert.Equal(t, v1.PersistentVolumeReclaimRetain, pv.Spec.PersistentVolumeReclaimPolicy) - assert.Equal(t, "foo.io/rook-ceph", pv.Spec.PersistentVolumeSource.FlexVolume.Driver) - assert.Equal(t, "ext3", pv.Spec.PersistentVolumeSource.FlexVolume.FSType) - assert.Equal(t, "testCluster", pv.Spec.PersistentVolumeSource.FlexVolume.Options["clusterNamespace"]) - assert.Equal(t, "class-1", pv.Spec.PersistentVolumeSource.FlexVolume.Options["storageClass"]) - assert.Equal(t, "testpool", pv.Spec.PersistentVolumeSource.FlexVolume.Options["pool"]) - assert.Equal(t, "pvc-uid-1-1", pv.Spec.PersistentVolumeSource.FlexVolume.Options["image"]) - assert.Equal(t, "", pv.Spec.PersistentVolumeSource.FlexVolume.Options["dataBlockPool"]) - - volume = newProvisionOptions(newStorageClass("class-1", "foo.io/block", map[string]string{"pool": "testpool", "clusterNamespace": "testCluster", "fsType": "ext3", "dataBlockPool": "iamdatapool"}, v1.PersistentVolumeReclaimRecycle), newClaim("claim-1", "uid-1-1", "class-1", "", "class-1", nil), v1.PersistentVolumeReclaimRecycle) - - pv, ps, err = provisioner.Provision(ctx, volume) - assert.Nil(t, err) - - assert.Equal(t, controller.ProvisioningFinished, ps) - assert.Equal(t, "pvc-uid-1-1", pv.Name) - assert.NotNil(t, pv.Spec.PersistentVolumeSource.FlexVolume) - assert.Equal(t, v1.PersistentVolumeReclaimRecycle, pv.Spec.PersistentVolumeReclaimPolicy) - assert.Equal(t, "foo.io/rook-ceph", pv.Spec.PersistentVolumeSource.FlexVolume.Driver) - assert.Equal(t, "ext3", pv.Spec.PersistentVolumeSource.FlexVolume.FSType) - assert.Equal(t, "testCluster", pv.Spec.PersistentVolumeSource.FlexVolume.Options["clusterNamespace"]) - assert.Equal(t, "class-1", pv.Spec.PersistentVolumeSource.FlexVolume.Options["storageClass"]) - assert.Equal(t, "testpool", pv.Spec.PersistentVolumeSource.FlexVolume.Options["pool"]) - assert.Equal(t, "pvc-uid-1-1", pv.Spec.PersistentVolumeSource.FlexVolume.Options["image"]) - assert.Equal(t, "iamdatapool", pv.Spec.PersistentVolumeSource.FlexVolume.Options["dataBlockPool"]) -} - -func TestReclaimPolicyForProvisionedImages(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - namespace := "ns" - configDir, _ := ioutil.TempDir("", "") - os.Setenv("POD_NAMESPACE", "rook-system") - defer os.Setenv("POD_NAMESPACE", "") - defer os.RemoveAll(configDir) - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if strings.Contains(command, "ceph-authtool") { - err := clienttest.CreateConfigDir(path.Join(configDir, namespace)) - assert.Nil(t, err) - } - - if command == "rbd" && args[0] == "create" { - return `[{"image":"pvc-uid-1-1","size":1048576,"format":2}]`, nil - } - - if command == "rbd" && args[0] == "info" { - assert.Equal(t, "testpool/pvc-uid-1-1", args[1]) - return `{"name":"pvc-uid-1-1","size":1048576,"objects":1,"order":20,"object_size":1048576,"block_name_prefix":"testpool_data.229226b8b4567",` + - `"format":2,"features":["layering"],"op_features":[],"flags":[],"create_timestamp":"Fri Oct 5 19:46:20 2018"}`, nil - } - return "", nil - }, - } - - context := &clusterd.Context{ - Clientset: clientset, - Executor: executor, - ConfigDir: configDir, - } - - provisioner := New(context, "foo.io") - for _, reclaimPolicy := range []v1.PersistentVolumeReclaimPolicy{v1.PersistentVolumeReclaimDelete, v1.PersistentVolumeReclaimRetain, v1.PersistentVolumeReclaimRecycle} { - volume := newProvisionOptions(newStorageClass("class-1", "foo.io/block", map[string]string{"pool": "testpool", "clusterNamespace": "testCluster", "fsType": "ext3", "dataBlockPool": "iamdatapool"}, reclaimPolicy), newClaim("claim-1", "uid-1-1", "class-1", "", "class-1", nil), reclaimPolicy) - pv, ps, err := provisioner.Provision(ctx, volume) - assert.Nil(t, err) - - assert.Equal(t, controller.ProvisioningFinished, ps) - assert.Equal(t, reclaimPolicy, pv.Spec.PersistentVolumeReclaimPolicy) - } -} - -func TestParseClassParameters(t *testing.T) { - cfg := make(map[string]string) - cfg["pool"] = "testPool" - cfg["clustername"] = "myname" - cfg["fstype"] = "ext4" - - provConfig, err := parseClassParameters(cfg) - assert.Nil(t, err) - - assert.Equal(t, "testPool", provConfig.blockPool) - assert.Equal(t, "myname", provConfig.clusterNamespace) - assert.Equal(t, "ext4", provConfig.fstype) -} - -func TestParseClassParametersDefault(t *testing.T) { - cfg := make(map[string]string) - cfg["blockPool"] = "testPool" - - provConfig, err := parseClassParameters(cfg) - assert.Nil(t, err) - - assert.Equal(t, "testPool", provConfig.blockPool) - assert.Equal(t, "rook-ceph", provConfig.clusterNamespace) - assert.Equal(t, "", provConfig.fstype) -} - -func TestParseClassParametersNoPool(t *testing.T) { - cfg := make(map[string]string) - cfg["clustername"] = "myname" - - _, err := parseClassParameters(cfg) - assert.EqualError(t, err, "StorageClass for provisioner rookVolumeProvisioner must contain 'blockPool' parameter") - -} - -func TestParseClassParametersInvalidOption(t *testing.T) { - cfg := make(map[string]string) - cfg["pool"] = "testPool" - cfg["foo"] = "bar" - - _, err := parseClassParameters(cfg) - assert.EqualError(t, err, "invalid option \"foo\" for volume plugin rookVolumeProvisioner") -} - -func newProvisionOptions(storageClass *storagebeta.StorageClass, claim *v1.PersistentVolumeClaim, reclaimPolicy v1.PersistentVolumeReclaimPolicy) controller.ProvisionOptions { - return controller.ProvisionOptions{ - StorageClass: &storage.StorageClass{ - ReclaimPolicy: &reclaimPolicy, - Parameters: storageClass.Parameters, - }, - PVName: "pvc-" + string(claim.ObjectMeta.UID), - PVC: claim, - } -} - -func newStorageClass(name, provisioner string, parameters map[string]string, reclaimPolicy v1.PersistentVolumeReclaimPolicy) *storagebeta.StorageClass { - return &storagebeta.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Provisioner: provisioner, - Parameters: parameters, - ReclaimPolicy: &reclaimPolicy, - } -} - -func newClaim(name, claimUID, provisioner, volumeName, storageclassName string, annotations map[string]string) *v1.PersistentVolumeClaim { - claim := &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: v1.NamespaceDefault, - UID: types.UID(claimUID), - ResourceVersion: "0", - SelfLink: "/api/v1/namespaces/" + v1.NamespaceDefault + "/persistentvolumeclaims/" + name, - }, - Spec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Mi"), - }, - }, - VolumeName: volumeName, - StorageClassName: &storageclassName, - }, - Status: v1.PersistentVolumeClaimStatus{ - Phase: v1.ClaimPending, - }, - } - for k, v := range annotations { - claim.Annotations[k] = v - } - return claim -} diff --git a/pkg/operator/ceph/reporting/reporting.go b/pkg/operator/ceph/reporting/reporting.go deleted file mode 100644 index 98ec711ab..000000000 --- a/pkg/operator/ceph/reporting/reporting.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Reporting focuses on reporting Events, Status Conditions, and the like to users. -package reporting - -import ( - "context" - "fmt" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/dependents" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/retry" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// ReportReconcileResult will report the result of an object's reconcile in 2 ways: -// 1. to the given logger -// 2. as an event on the object (via the given event recorder) -// The results of the object's reconcile should include the object, the reconcile response, and the -// error returned by the reconcile. -// The function is designed to return the appropriate values needed for the controller-runtime -// framework's Reconcile() method. -func ReportReconcileResult(logger *capnslog.PackageLogger, recorder *k8sutil.EventReporter, - obj client.Object, reconcileResponse reconcile.Result, err error, -) (reconcile.Result, error) { - kind := obj.GetObjectKind().GroupVersionKind().Kind - nsName := fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName()) - - if err != nil { - // 1. log - logger.Errorf("failed to reconcile %s %q. %v", kind, nsName, err) - - // 2. event - recorder.ReportIfNotPresent(obj, corev1.EventTypeWarning, string(cephv1.ReconcileFailed), err.Error()) - - if !reconcileResponse.IsZero() { - // The framework will requeue immediately if there is an error. If we get an error with - // a non-empty reconcile response, just return the response with the error now logged as - // an event so that the framework can pause before the next reconcile per the response's - // intent. - return reconcileResponse, nil - } - } else { - successMsg := fmt.Sprintf("successfully configured %s %q", kind, nsName) - - // 1. log - logger.Debug(successMsg) - - // 2. event - recorder.ReportIfNotPresent(obj, corev1.EventTypeNormal, string(cephv1.ReconcileSucceeded), successMsg) - } - - return reconcileResponse, err -} - -// ReportDeletionBlockedDueToDependents reports that deletion of a Rook-Ceph object is blocked due -// to the given dependents in 3 ways: -// 1. to the given logger -// 2. as a condition on the object (added to the object's conditions list given) -// 3. as the returned error which should be included in the FailedReconcile message -func ReportDeletionBlockedDueToDependents( - logger *capnslog.PackageLogger, client client.Client, obj cephv1.StatusConditionGetter, deps *dependents.DependentList, -) error { - kind := obj.GetObjectKind().GroupVersionKind().Kind - nsName := types.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: obj.GetName(), - } - blockedMsg := deps.StringWithHeader("%s %q will not be deleted until all dependents are removed", kind, nsName.String()) - - // 1. log - logger.Info(blockedMsg) - - // 2. condition - blockedCond := dependents.DeletionBlockedDueToDependentsCondition(true, blockedMsg) - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - if err := client.Get(context.TODO(), nsName, obj); err != nil { - return errors.Wrapf(err, "failed to get latest %s %q", kind, nsName.String()) - } - if err := UpdateStatusCondition(client, obj, blockedCond); err != nil { - return err - } - return nil - }) - if err != nil { - return errors.Wrapf(err, "on condition %s", blockedMsg) - } - - // 3. error for later FailedReconcile message - return errors.New(blockedMsg) -} - -// ReportDeletionNotBlockedDueToDependents reports that deletion of a Rook-Ceph object is proceeding -// and NOT blocked due to dependents in 3 ways: -// 1. to the given logger -// 2. as an event on the object (via the given event recorder) -// 3. as a condition on the object (added to the object's conditions list given) -func ReportDeletionNotBlockedDueToDependents( - logger *capnslog.PackageLogger, client client.Client, recorder *k8sutil.EventReporter, obj cephv1.StatusConditionGetter, -) { - kind := obj.GetObjectKind().GroupVersionKind().Kind - nsName := types.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: obj.GetName(), - } - safeMsg := fmt.Sprintf("%s %q can be deleted safely", kind, nsName.String()) - deletingMsg := fmt.Sprintf("deleting %s %q", kind, nsName.String()) - - // 1. log - logger.Infof("%s. %s", safeMsg, deletingMsg) - - // 2. event - recorder.ReportIfNotPresent(obj, corev1.EventTypeNormal, string(cephv1.DeletingReason), deletingMsg) - - // 3. condition - unblockedCond := dependents.DeletionBlockedDueToDependentsCondition(false, safeMsg) - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - if err := client.Get(context.TODO(), nsName, obj); err != nil { - return errors.Wrapf(err, "failed to get latest %s %q", kind, nsName.String()) - } - if err := UpdateStatusCondition(client, obj, unblockedCond); err != nil { - return err - } - return nil - }) - if err != nil { - logger.Warningf("continuing deletion of %s %q without setting the condition. %v", kind, nsName.String(), err) - } -} diff --git a/pkg/operator/ceph/reporting/status.go b/pkg/operator/ceph/reporting/status.go deleted file mode 100644 index d0af107c9..000000000 --- a/pkg/operator/ceph/reporting/status.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package reporting - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// UpdateStatus updates an object with a given status. The object is updated with the latest version -// from the server on a successful update. -func UpdateStatus(client client.Client, obj client.Object) error { - nsName := types.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: obj.GetName(), - } - - // Try to update the status - err := client.Status().Update(context.Background(), obj) - // If the object doesn't exist yet, we need to initialize it - if kerrors.IsNotFound(err) { - err = client.Update(context.Background(), obj) - } - if err != nil { - return errors.Wrapf(err, "failed to update object %q status", nsName.String()) - } - - return nil -} - -// UpdateStatusCondition updates (or adds to) the status condition to the given object. The object -// is updated with the latest version from the server on a successful update. -func UpdateStatusCondition( - client client.Client, obj cephv1.StatusConditionGetter, newCond cephv1.Condition, -) error { - kind := obj.GetObjectKind().GroupVersionKind().Kind - nsName := fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName()) - - cephv1.SetStatusCondition(obj.GetStatusConditions(), newCond) - if err := UpdateStatus(client, obj); err != nil { - return errors.Wrapf(err, "failed to update %s %q status condition %s=%s", kind, nsName, newCond.Type, newCond.Status) - } - - return nil -} diff --git a/pkg/operator/ceph/reporting/status_test.go b/pkg/operator/ceph/reporting/status_test.go deleted file mode 100644 index 522438c78..000000000 --- a/pkg/operator/ceph/reporting/status_test.go +++ /dev/null @@ -1,177 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package reporting - -import ( - "context" - "fmt" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestUpdateStatus(t *testing.T) { - t.Run("status does not exist initially", func(t *testing.T) { - fakeObject := &cephv1.CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "rook-ceph", - Finalizers: []string{}, - }, - // Status: unset - } - nsName := types.NamespacedName{ - Namespace: fakeObject.Namespace, - Name: fakeObject.Name, - } - - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, fakeObject) - // have to use deepcopy to ensure the tracker doesn't have the pointer for our test object - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(fakeObject.DeepCopy()).Build() - - // get version of the object in the fake object tracker - getObj := &cephv1.CephBlockPool{} - err := cl.Get(context.TODO(), nsName, getObj) - assert.NoError(t, err) - - objCpy := getObj.DeepCopy() - getObj.Status = &cephv1.CephBlockPoolStatus{ - Phase: cephv1.ConditionProgressing, - } - err = UpdateStatus(cl, getObj) - assert.NoError(t, err) - - updObj := &cephv1.CephBlockPool{} - err = cl.Get(context.TODO(), nsName, updObj) - assert.NoError(t, err) - - fmt.Println(objCpy) - }) - - fakeObject := &cephv1.CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "rook-ceph", - Finalizers: []string{}, - }, - Status: &cephv1.CephBlockPoolStatus{ - Phase: "", - }, - } - nsName := types.NamespacedName{ - Namespace: fakeObject.Namespace, - Name: fakeObject.Name, - } - - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, fakeObject) - // use deepcopy to ensure the tracker doesn't have the pointer for our test object - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(fakeObject.DeepCopy()).Build() - - // get version of the object in the fake object tracker - getObj := &cephv1.CephBlockPool{} - err := cl.Get(context.TODO(), nsName, getObj) - assert.NoError(t, err) - - getObj.Status.Phase = cephv1.ConditionReady - err = UpdateStatus(cl, getObj) - assert.NoError(t, err) - - err = cl.Get(context.TODO(), nsName, getObj) - assert.NoError(t, err) - assert.Equal(t, cephv1.ConditionReady, getObj.Status.Phase) -} - -func TestUpdateStatusCondition(t *testing.T) { - fakeObject := &cephv1.CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "rook-ceph", - Finalizers: []string{}, - }, - Status: &cephv1.ObjectStoreStatus{ - Phase: cephv1.ConditionDeleting, - }, - } - nsName := types.NamespacedName{ - Namespace: fakeObject.Namespace, - Name: fakeObject.Name, - } - - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, fakeObject) - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(fakeObject.DeepCopy()).Build() - - // get version of the object in the fake object tracker - getObj := &cephv1.CephObjectStore{} - err := cl.Get(context.TODO(), nsName, getObj) - assert.NoError(t, err) - assert.Zero(t, len(getObj.Status.Conditions)) - - t.Run("add new status", func(t *testing.T) { - getObj := &cephv1.CephObjectStore{} - err := cl.Get(context.TODO(), nsName, getObj) - assert.NoError(t, err) - - startCond := cephv1.Condition{ - Type: cephv1.ConditionDeletionIsBlocked, - Status: v1.ConditionTrue, // changed - Reason: "start", // changed - Message: "start", // changed - } - - err = UpdateStatusCondition(cl, getObj, startCond) - assert.NoError(t, err) - - err = cl.Get(context.TODO(), nsName, getObj) - assert.NoError(t, err) - cond := getObj.Status.Conditions[0] - assert.Equal(t, v1.ConditionTrue, cond.Status) - assert.Equal(t, cephv1.ConditionReason("start"), cond.Reason) - assert.Equal(t, "start", cond.Message) - }) - - t.Run("update status", func(t *testing.T) { - getObj := &cephv1.CephObjectStore{} - err := cl.Get(context.TODO(), nsName, getObj) - assert.NoError(t, err) - - updatedCond := cephv1.Condition{ - Type: cephv1.ConditionDeletionIsBlocked, - Status: v1.ConditionFalse, // changed - Reason: "update", // changed - Message: "update", // changed - } - - err = UpdateStatusCondition(cl, getObj, updatedCond) - assert.NoError(t, err) - - err = cl.Get(context.TODO(), nsName, getObj) - assert.NoError(t, err) - cond := getObj.Status.Conditions[0] - assert.Equal(t, v1.ConditionFalse, cond.Status) - assert.Equal(t, cephv1.ConditionReason("update"), cond.Reason) - assert.Equal(t, "update", cond.Message) - }) -} diff --git a/pkg/operator/ceph/server.go b/pkg/operator/ceph/server.go deleted file mode 100644 index 60ed2df63..000000000 --- a/pkg/operator/ceph/server.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operator - -import ( - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" -) - -var ( - scheme = runtime.NewScheme() - resources = []webhook.Validator{&cephv1.CephCluster{}, &cephv1.CephBlockPool{}, &cephv1.CephObjectStore{}} -) - -const ( - // Default directory where TLS certs are stored - certDir = "/etc/webhook" - // Default port for server - port = 8079 -) - -// StartAdmissionController will start the server -func StartAdmissionController() error { - logger.Infof("starting the webhook for backend ceph") - err := cephv1.AddToScheme(scheme) - if err != nil { - return errors.Wrap(err, "failed to add to scheme") - } - opts := ctrl.Options{ - Scheme: scheme, - Port: port, - CertDir: certDir, - } - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), opts) - if err != nil { - return errors.Wrap(err, "failed to create manager") - } - for _, resource := range resources { - err = ctrl.NewWebhookManagedBy(mgr).For(resource).Complete() - if err != nil { - return errors.Wrap(err, "failed to register webhooks") - } - } - logger.Info("starting webhook server") - err = mgr.Start(ctrl.SetupSignalHandler()) - if err != nil { - return errors.Wrap(err, "failed to start server") - } - - return nil -} diff --git a/pkg/operator/ceph/test/container.go b/pkg/operator/ceph/test/container.go deleted file mode 100644 index 58871e715..000000000 --- a/pkg/operator/ceph/test/container.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package test provides common resources useful for testing many Ceph daemons. This includes -// functions for testing that resources match what is expected. -package test - -import ( - "strings" - "testing" - - "github.com/coreos/pkg/capnslog" - optest "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - "k8s.io/api/core/v1" -) - -// ContainerTestDefinition defines which k8s container values to test and what those values should -// be. Any definition item may be nil, and a nil value will prevent the item from being tested in -// the container test. -type ContainerTestDefinition struct { - // Image is the name of the container image - Image *string - // Command is the container command - Command []string - // Args is a list of expected arguments in the same format as the expected arguments from - // the ArgumentsMatchExpected() function. - Args [][]string - // InOrderArgs is a map of argument position (int) to the argument itself (string). If the - // "third" arg must be exactly the third argument this should be: InOrderArgs[2]="third" - InOrderArgs map[int]string - // VolumeMountNames is a list of volume mount names which must be mounted in the container - VolumeMountNames []string - // EnvCount is the number of 'Env' variables the container should define - EnvCount *int - // Ports is a list of ports the container must define. This list is in order, and each port's - // 'ContainerPort' and 'Protocol' are tested for equality. - // Note: port's in general aren't order-dependent, but there is not yet a method to test for - // the existence of a port in a list of ports without caring about order. - Ports []v1.ContainerPort - // IsPrivileged tests if the container is privileged (true) or unprivileged (false) - IsPrivileged *bool -} - -// TestContainer tests that a container matches the container test definition. Moniker is a name -// given to the container for identifying it in tests. Cont is the container to be tested. -// Logger is the logger to output logs to. -func (d *ContainerTestDefinition) TestContainer( - t *testing.T, - moniker string, - cont *v1.Container, - logger *capnslog.PackageLogger, -) { - - if d.Image != nil { - assert.Equal(t, *d.Image, cont.Image) - } - logCommandWithArgs(moniker, cont.Command, cont.Args, logger) - if d.Command != nil { - assert.Equal(t, len(d.Command), len(cont.Command)) - assert.Equal(t, strings.Join(d.Command, " "), strings.Join(cont.Command, " ")) - } - if d.Args != nil { - assert.Nil(t, optest.ArgumentsMatchExpected(cont.Args, d.Args)) - } - if d.InOrderArgs != nil { - for argNum, arg := range d.InOrderArgs { - assert.Equal(t, cont.Args[argNum], arg) - } - } - if d.VolumeMountNames != nil { - assert.Equal(t, len(d.VolumeMountNames), len(cont.VolumeMounts)) - for _, n := range d.VolumeMountNames { - assert.Nil(t, optest.VolumeMountExists(n, cont.VolumeMounts)) - } - } - if d.EnvCount != nil { - assert.Equal(t, *d.EnvCount, len(cont.Env)) - } - if d.Ports != nil { - assert.Equal(t, len(d.Ports), len(cont.Ports)) - for i, p := range d.Ports { - assert.Equal(t, p.ContainerPort, cont.Ports[i].ContainerPort) - assert.Equal(t, p.Protocol, cont.Ports[i].Protocol) - } - } - if d.IsPrivileged != nil { - assert.Equal(t, *d.IsPrivileged, *cont.SecurityContext.Privileged) - } -} - -// logCommandWithArgs writes a command and its arguments to the logger with a moniker to identify it -func logCommandWithArgs(moniker string, command, args []string, logger *capnslog.PackageLogger) { - logger.Infof("%s command : %s %s", moniker, strings.Join(command, " "), strings.Join(args, " ")) -} diff --git a/pkg/operator/ceph/test/containers.go b/pkg/operator/ceph/test/containers.go deleted file mode 100644 index 093ff0ddc..000000000 --- a/pkg/operator/ceph/test/containers.go +++ /dev/null @@ -1,265 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - "fmt" - "regexp" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" -) - -var requiredEnvVars = []string{ - "CONTAINER_IMAGE", "POD_NAME", "POD_NAMESPACE", "NODE_NAME", - "ROOK_CEPH_MON_HOST", "ROOK_CEPH_MON_INITIAL_MEMBERS", - "POD_CPU_LIMIT", "POD_MEMORY_LIMIT", "POD_MEMORY_REQUEST", - "POD_CPU_REQUEST", -} - -// A ContainersTester is a helper exposing methods for testing required Ceph specifications common -// for all Ceph containers. -type ContainersTester struct { - t *testing.T - containers []v1.Container -} - -// Containers creates a ContainersTester from a parent PodSpecTester. Because ContainersTester is -// intended to test the full list of containers (both init and run containers) in a PodSpec, this -// method is the only way of creating a ContainersTester. -func (ps *PodSpecTester) Containers() *ContainersTester { - return &ContainersTester{ - t: ps.t, - containers: allContainers(ps.spec), - } -} - -// AssertArgsContainCephRequirements asserts that all Ceph containers under test have the flags -// required for all Ceph containers. -func (ct *ContainersTester) AssertArgsContainCephRequirements() { - for _, c := range ct.containers { - if !isCephCommand(c.Command) { - continue // don't consider containers that aren't Ceph commands - } - requiredFlags := []string{ - "--log-to-stderr=true", - "--err-to-stderr=true", - "--mon-cluster-log-to-stderr=true", - "--log-stderr-prefix=debug ", - "--mon-host=$(ROOK_CEPH_MON_HOST)", - "--mon-initial-members=$(ROOK_CEPH_MON_INITIAL_MEMBERS)", - } - assert.Subset(ct.t, c.Args, requiredFlags, "required Ceph flags are not in container"+c.Name) - fsidPresent := false - for _, a := range c.Args { - if strings.HasPrefix(a, "--fsid=") { - fsidPresent = true - break - } - } - assert.True(ct.t, fsidPresent, "--fsid=XXXXXXXX is not present in container args:", c.Args) - } -} - -// RequireAdditionalEnvVars adds a list of environment variable names to the list of required -// variables for a single unit test (it does not persist between different tests). -// Usage: myPodTemplateSpecTester.Spec().Containers().RequireAdditionalEnvVars("I_AM", "REQUIRED") -func (*ContainersTester) RequireAdditionalEnvVars(varNames ...string) { - requiredEnvVars = append(requiredEnvVars, varNames...) -} - -// AssertEnvVarsContainCephRequirements asserts that all Ceph containers under test have the -// environment variables required for all Ceph containers. -func (ct *ContainersTester) AssertEnvVarsContainCephRequirements() { - for _, c := range ct.containers { - if !isCephCommand(c.Command) { - continue // don't consider containers that aren't Ceph commands - } - localcontainer := c - assert.Subset(ct.t, varNames(&localcontainer), requiredEnvVars) - for _, e := range c.Env { - // For the required env vars, make sure they are sourced as expected - switch e.Name { - case "CONTAINER_IMAGE": - assert.Equal(ct.t, c.Image, e.Value, - "CONTAINER_IMAGE env var does not have the appropriate source:", e) - case "POD_NAME": - assert.Equal(ct.t, "metadata.name", e.ValueFrom.FieldRef.FieldPath, - "POD_NAME env var does not have the appropriate source:", e) - case "POD_NAMESPACE": - assert.Equal(ct.t, "metadata.namespace", e.ValueFrom.FieldRef.FieldPath, - "POD_NAMESPACE env var does not have the appropriate source:", e) - case "NODE_NAME": - assert.Equal(ct.t, "spec.nodeName", e.ValueFrom.FieldRef.FieldPath, - "NODE_NAME env var does not have the appropriate source:", e) - case "ROOK_CEPH_MON_HOST": - assert.Equal(ct.t, "rook-ceph-config", e.ValueFrom.SecretKeyRef.LocalObjectReference.Name, - "ROOK_CEPH_MON_HOST env var does not have appropriate source:", e) - assert.Equal(ct.t, "mon_host", e.ValueFrom.SecretKeyRef.Key, - "ROOK_CEPH_MON_HOST env var does not have appropriate source:", e) - case "ROOK_CEPH_MON_INITIAL_MEMBERS": - assert.Equal(ct.t, "rook-ceph-config", e.ValueFrom.SecretKeyRef.LocalObjectReference.Name, - "ROOK_CEPH_MON_INITIAL_MEMBERS env var does not have appropriate source:", e) - assert.Equal(ct.t, "mon_initial_members", e.ValueFrom.SecretKeyRef.Key, - "ROOK_CEPH_MON_INITIAL_MEMBERS env var does not have appropriate source:", e) - case "POD_MEMORY_LIMIT": - assert.Equal(ct.t, "limits.memory", e.ValueFrom.ResourceFieldRef.Resource, - "POD_MEMORY_LIMIT env var does not have the appropriate source:", e) - case "POD_MEMORY_REQUEST": - assert.Equal(ct.t, "requests.memory", e.ValueFrom.ResourceFieldRef.Resource, - "POD_MEMORY_REQUEST env var does not have the appropriate source:", e) - case "POD_CPU_LIMIT": - assert.Equal(ct.t, "limits.cpu", e.ValueFrom.ResourceFieldRef.Resource, - "POD_CPU_LIMIT env var does not have the appropriate source:", e) - case "POD_CPU_REQUEST": - assert.Equal(ct.t, "requests.cpu", e.ValueFrom.ResourceFieldRef.Resource, - "POD_CPU_REQUEST env var does not have the appropriate source:", e) - } - } - vars := FindDuplicateEnvVars(c) - assert.Equal(ct.t, 0, len(vars), fmt.Sprintf("found duplicate env vars: %v", vars)) - } -} - -// AssertArgReferencesMatchEnvVars asserts that for each container under test, any references to -// Kubernetes environment variables (e.g., $(POD_NAME)), have an environment variable set to source -// the value. -func (ct *ContainersTester) AssertArgReferencesMatchEnvVars() { - for _, c := range ct.containers { - localcontainer := c - assert.Subset(ct.t, varNames(&localcontainer), argEnvReferences(&localcontainer), - "container: "+c.Name, - "references to env vars in args do not match env vars", - "args:", c.Args, "envs:", c.Env) - } - // also make sure there are no extraneous env vars - // the only allowed extraneous vars are the required vars - assert.ElementsMatch(ct.t, ct.allNonrequiredVarNames(), ct.allNonrequiredArgEnvReferences(), - "there are extra arguments or references which do not have a corresponding ref/arg", - fmt.Sprintf("%+v", ct.allNonrequiredVarNames()), - fmt.Sprintf("%+v", ct.allNonrequiredArgEnvReferences())) -} - -// AssertCephImagesMatch asserts that for all Ceph containers under test, the Ceph image used is the -// expected image. -func (ct *ContainersTester) AssertCephImagesMatch(image string) { - for _, c := range ct.containers { - if !isCephCommand(c.Command) { - continue // don't consider containers that aren't Ceph commands - } - assert.Equal(ct.t, image, c.Image, "Ceph image for container "+c.Name+"does not match expected") - } -} - -// RunFullSuite runs all assertion tests for the Containers under test. -func (ct *ContainersTester) RunFullSuite(cephImage, cpuResourceLimit, cpuResourceRequest, memoryResourceLimit, memoryResourceRequest string) { - ct.AssertEnvVarsContainCephRequirements() - ct.AssertArgReferencesMatchEnvVars() - ct.AssertArgsContainCephRequirements() - ct.AssertCephImagesMatch(cephImage) -} - -func isCephCommand(command []string) bool { - // assume a ceph command is identified by the existence of the word "ceph" somewhere in the - // first command word. - // Are Ceph commands: ["ceph-mon", ...], ["ceph-mgr", ...], ["ceph", "config", ...] - // Are not: ["cp", "/etc/ceph/...], ... - return strings.Contains(command[0], "ceph") -} - -func argEnvReferences(c *v1.Container) []string { - argRefSet := map[string]bool{} - for _, a := range c.Args { - argRefRegex, e := regexp.Compile(`\$\(([a-zA-Z][a-zA-Z0-9_]*)\)`) - if e != nil { - panic("could not compile argument reference regexp") - } - matches := argRefRegex.FindAllStringSubmatch(a, -1) - for _, m := range matches { - argRefSet[m[1]] = true - } - } - refs := []string{} - for r := range argRefSet { - refs = append(refs, r) - } - return refs -} - -func varNames(c *v1.Container) []string { - vars := []string{} - for _, v := range c.Env { - vars = append(vars, v.Name) - } - return vars -} - -func (ct *ContainersTester) allNonrequiredArgEnvReferences() []string { - allSet := map[string]bool{} - for _, c := range ct.containers { - localcontainer := c - for _, r := range argEnvReferences(&localcontainer) { - allSet[r] = true - } - } - for _, req := range requiredEnvVars { - allSet[req] = false // required env vars NOT required - } - all := []string{} - for r, req := range allSet { - if req { - all = append(all, r) - } - } - return all -} - -func (ct *ContainersTester) allNonrequiredVarNames() []string { - allSet := map[string]bool{} - for _, c := range ct.containers { - localcontainer := c - for _, v := range varNames(&localcontainer) { - allSet[v] = true - } - } - for _, req := range requiredEnvVars { - allSet[req] = false // required env vars NOT required - } - all := []string{} - for v, req := range allSet { - if req { - all = append(all, v) - } - } - return all -} - -// FindDuplicateEnvVars finds duplicated environment variables and return the variable name list. -func FindDuplicateEnvVars(container v1.Container) []string { - var duplicateEnvVars []string - envVars := map[string]string{} - for _, v := range container.Env { - _, ok := envVars[v.Name] - if ok { - duplicateEnvVars = append(duplicateEnvVars, v.Name) - } - envVars[v.Name] = v.Value - } - return duplicateEnvVars -} diff --git a/pkg/operator/ceph/test/podspec.go b/pkg/operator/ceph/test/podspec.go deleted file mode 100644 index daf7e2016..000000000 --- a/pkg/operator/ceph/test/podspec.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -/* -The goal here is not to test every individual specification of the pod/container. Testing that the -generated pod spec has each piece set in the Rook code isn't a particularly effective use of unit -tests. Any time the Rook code changes the pod spec intentionally, the unit test changes in the -exact same way, which doesn't really help prevent against errors where devs are changing the wrong -spec values. - -Instead, the unit tests should focus on testing things that are universal truths about -Ceph pod specifications that can help catch when pods ... - - do not have the minimum requirements for running Ceph tools/daemons - - have vestigial values set that are no longer needed - - have references to absent resources (e.g., a volume mount without a volume source) - -In this way, unit tests for pod specifications can be consistent and shared between all Ceph pods -created by the Rook operator. With this consistency between unit tests, there should be increased -consistency between the Ceph pods that Rook creates, ensuring a consistent user experience -interacting with pods. -*/ - -import ( - "fmt" - "testing" - - "github.com/rook/rook/pkg/operator/ceph/config" - optest "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" -) - -// A PodSpecTester is a helper exposing methods for testing required Ceph specifications common for -// all Ceph PodSpecs. -type PodSpecTester struct { - t *testing.T - spec *v1.PodSpec -} - -// Spec creates a PodSpecTester from a parent PodTemplateSpecTester. -func (pt *PodTemplateSpecTester) Spec() *PodSpecTester { - return NewPodSpecTester(pt.t, &pt.template.Spec) -} - -// NewPodSpecTester creates a new tester to test the given PodSpec. -func NewPodSpecTester(t *testing.T, spec *v1.PodSpec) *PodSpecTester { - return &PodSpecTester{t: t, spec: spec} -} - -// AssertVolumesMeetCephRequirements asserts that all the required Ceph volumes exist in the pod -// spec under test, Volumes list. -func (ps *PodSpecTester) AssertVolumesMeetCephRequirements( - daemonType, daemonID string, -) { - // #nosec because of the word `Secret` - keyringSecretName := fmt.Sprintf("rook-ceph-%s-%s-keyring", daemonType, daemonID) - if daemonType == config.MonType { - // #nosec because of the word `Secret` - keyringSecretName = "rook-ceph-mons-keyring" - } - // CephFS mirror has no index so the daemon name is just "rook-ceph-fs-mirror" - if daemonType == config.FilesystemMirrorType { - keyringSecretName = fmt.Sprintf("rook-ceph-%s-keyring", daemonType) - } - requiredVols := []string{"rook-config-override", keyringSecretName} - if daemonType != config.RbdMirrorType && daemonType != config.FilesystemMirrorType { - requiredVols = append(requiredVols, "ceph-daemon-data") - } - vols := []string{} - - for _, v := range ps.spec.Volumes { - vols = append(vols, v.Name) - switch v.Name { - case "ceph-daemon-data": - switch daemonType { - case config.MonType: - // mons may be host path or pvc - assert.True(ps.t, - v.VolumeSource.HostPath != nil || v.VolumeSource.PersistentVolumeClaim != nil, - string(daemonType)+" daemon should be host path or pvc:", v) - case config.OsdType: - // osds MUST be host path - assert.NotNil(ps.t, v.VolumeSource.HostPath, - string(daemonType)+" daemon should be host path:", v) - case config.MgrType, config.MdsType, config.RgwType: - // mgrs, mdses, and rgws MUST be host path - assert.NotNil(ps.t, v.VolumeSource.EmptyDir, - string(daemonType)+" daemon should be empty dir:", v) - } - case "rook-ceph-config": - assert.Equal(ps.t, "rook-ceph-config", v.VolumeSource.ConfigMap.LocalObjectReference.Name, - "Ceph config volume source is wrong path:", v) - case keyringSecretName: - assert.Equal(ps.t, keyringSecretName, v.VolumeSource.Secret.SecretName, - "daemon keyring volume source is wrong path:", v) - } - } - assert.Subset(ps.t, vols, requiredVols, - "required volumes don't exist in pod spec's volume list:", ps.spec.Volumes) -} - -// AssertRestartPolicyAlways asserts that the pod spec is set to always restart on failure. -func (ps *PodSpecTester) AssertRestartPolicyAlways() { - assert.Equal(ps.t, v1.RestartPolicyAlways, ps.spec.RestartPolicy) -} - -// AssertChownContainer ensures that the init container to chown the Ceph data dir is present for -// Ceph daemons. -func (ps *PodSpecTester) AssertChownContainer(daemonType string) { - switch daemonType { - case config.MonType, config.MgrType, config.OsdType, config.MdsType, config.RgwType, config.RbdMirrorType: - assert.True(ps.t, containerExists("chown-container-data-dir", ps.spec)) - } -} - -// AssertPriorityClassNameMatch asserts that the pod spec has priorityClassName set to be the same -func (ps *PodSpecTester) AssertPriorityClassNameMatch(name string) { - assert.Equal(ps.t, name, ps.spec.PriorityClassName) -} - -// RunFullSuite runs all assertion tests for the PodSpec under test and its sub-resources. -func (ps *PodSpecTester) RunFullSuite( - daemonType, resourceName, cephImage, - cpuResourceLimit, cpuResourceRequest, memoryResourceLimit, memoryResourceRequest string, priorityClassName string, -) { - resourceExpectations := optest.ResourceLimitExpectations{ - CPUResourceLimit: cpuResourceLimit, - MemoryResourceLimit: memoryResourceLimit, - CPUResourceRequest: cpuResourceRequest, - MemoryResourceRequest: memoryResourceRequest, - } - ops := optest.NewPodSpecTester(ps.t, ps.spec) - ops.RunFullSuite(resourceExpectations) - - ps.AssertVolumesMeetCephRequirements(daemonType, resourceName) - ps.AssertRestartPolicyAlways() - ps.AssertChownContainer(daemonType) - ps.AssertPriorityClassNameMatch(priorityClassName) - ps.Containers().RunFullSuite(cephImage, cpuResourceLimit, cpuResourceRequest, memoryResourceLimit, memoryResourceRequest) -} - -func allContainers(p *v1.PodSpec) []v1.Container { - return append(p.InitContainers, p.Containers...) -} - -func containerExists(containerName string, p *v1.PodSpec) bool { - for _, c := range p.InitContainers { - if c.Name == containerName { - return true - } - } - return false -} diff --git a/pkg/operator/ceph/test/podtemplatespec.go b/pkg/operator/ceph/test/podtemplatespec.go deleted file mode 100644 index e3f97f815..000000000 --- a/pkg/operator/ceph/test/podtemplatespec.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - "testing" - - v1 "k8s.io/api/core/v1" -) - -// A PodTemplateSpecTester is a helper exposing methods for testing required Ceph specifications -// common for all Ceph PodTemplateSpecs. -type PodTemplateSpecTester struct { - t *testing.T - template *v1.PodTemplateSpec -} - -// NewPodTemplateSpecTester creates a new tester to test the given PodTemplateSpec -func NewPodTemplateSpecTester(t *testing.T, template *v1.PodTemplateSpec) *PodTemplateSpecTester { - return &PodTemplateSpecTester{t: t, template: template} -} - -// AssertLabelsContainCephRequirements asserts that the PodTemplateSpec under test contains labels -// which all Ceph pods should have. -func (pt *PodTemplateSpecTester) AssertLabelsContainCephRequirements( - daemonType, daemonID, appName, namespace string, -) { - AssertLabelsContainCephRequirements(pt.t, pt.template.ObjectMeta.Labels, - daemonType, daemonID, appName, namespace) -} - -// RunFullSuite runs all assertion tests for the PodTemplateSpec under test and its sub-resources. -func (pt *PodTemplateSpecTester) RunFullSuite( - daemonType, daemonID, appName, namespace, cephImage, - cpuResourceLimit, cpuResourceRequest, - memoryResourceLimit, memoryResourceRequest string, - priorityClassName string, -) { - pt.AssertLabelsContainCephRequirements(daemonType, daemonID, appName, namespace) - pt.Spec().RunFullSuite(daemonType, daemonID, cephImage, cpuResourceLimit, cpuResourceRequest, memoryResourceLimit, memoryResourceRequest, priorityClassName) -} diff --git a/pkg/operator/ceph/test/spec.go b/pkg/operator/ceph/test/spec.go deleted file mode 100644 index f07eed51c..000000000 --- a/pkg/operator/ceph/test/spec.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - "fmt" - "strings" - "testing" - - e "github.com/pkg/errors" - "github.com/rook/rook/pkg/operator/ceph/controller" - optest "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" -) - -func checkLabel(key, value string, labels map[string]string) error { - v, ok := labels[key] - if !ok { - return e.Errorf("label not present: expected={%s: %s}", key, value) - } - if v != value { - return e.Errorf("label mismatch: expected={%s: %s} present={%s: %s}", key, value, key, v) - } - return nil -} - -func combineErrors(errors ...error) error { - errText := "" - failure := false - for _, e := range errors { - if e != nil { - failure = true - errText = fmt.Sprintf("%v: %s", e, errText) // Will result in string ending in ": " - } - } - if failure { - errText = strings.TrimRight(errText, ": ") // Remove ": " from end - return e.Errorf("%s", errText) - } - return nil -} - -// VerifyAppLabels returns a descriptive error if app labels are not present or not as expected. -func VerifyAppLabels(appName, namespace string, labels map[string]string) error { - errA := checkLabel("app", appName, labels) - errB := checkLabel("rook_cluster", namespace, labels) - return combineErrors(errA, errB) -} - -// VerifyPodLabels returns a descriptive error if pod labels are not present or not as expected. -func VerifyPodLabels(appName, namespace, daemonType, daemonID string, labels map[string]string) error { - errA := VerifyAppLabels(appName, namespace, labels) - errB := checkLabel(controller.DaemonIDLabel, daemonID, labels) - errC := checkLabel(daemonType, daemonID, labels) - return combineErrors(errA, errB, errC) -} - -// AssertLabelsContainCephRequirements asserts that the the labels under test contain the labels -// which all Ceph pods should have. This can be used with labels for Kubernetes Deployments, -// DaemonSets, etc. -func AssertLabelsContainCephRequirements( - t *testing.T, labels map[string]string, - daemonType, daemonID, appName, namespace string, -) { - optest.AssertLabelsContainRookRequirements(t, labels, appName) - - resourceLabels := []string{} - for k, v := range labels { - resourceLabels = append(resourceLabels, fmt.Sprintf("%s=%s", k, v)) - } - expectedLabels := []string{ - "ceph_daemon_id=" + daemonID, - string(daemonType) + "=" + daemonID, - "rook_cluster" + "=" + namespace, - } - assert.Subset(t, resourceLabels, expectedLabels, - "labels on resource do not match Ceph requirements", labels) -} diff --git a/pkg/operator/ceph/version/version.go b/pkg/operator/ceph/version/version.go deleted file mode 100644 index fd1336061..000000000 --- a/pkg/operator/ceph/version/version.go +++ /dev/null @@ -1,352 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "fmt" - "regexp" - "strconv" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" -) - -// CephVersion represents the Ceph version format -type CephVersion struct { - Major int - Minor int - Extra int - Build int - CommitID string -} - -const ( - unknownVersionString = "" -) - -var ( - // Minimum supported version is 14.2.5 - Minimum = CephVersion{14, 2, 5, 0, ""} - // Nautilus Ceph version - Nautilus = CephVersion{14, 0, 0, 0, ""} - // Octopus Ceph version - Octopus = CephVersion{15, 0, 0, 0, ""} - // Pacific Ceph version - Pacific = CephVersion{16, 0, 0, 0, ""} - // Quincy Ceph version - Quincy = CephVersion{17, 0, 0, 0, ""} - - // cephVolumeLVMDiskSortingCephVersion introduced a major regression in c-v and thus is not suitable for production - cephVolumeLVMDiskSortingCephVersion = CephVersion{Major: 14, Minor: 2, Extra: 13} - - // supportedVersions are production-ready versions that rook supports - supportedVersions = []CephVersion{Nautilus, Octopus, Pacific} - - // unsupportedVersions are possibly Ceph pin-point release that introduced breaking changes and not recommended - unsupportedVersions = []CephVersion{cephVolumeLVMDiskSortingCephVersion} - - // for parsing the output of `ceph --version` - versionPattern = regexp.MustCompile(`ceph version (\d+)\.(\d+)\.(\d+)`) - - // For a build release the output is "ceph version 14.2.4-64.el8cp" - // So we need to detect the build version change - buildVersionPattern = regexp.MustCompile(`ceph version (\d+)\.(\d+)\.(\d+)\-(\d+)`) - - // for parsing the commit hash in the ceph --version output. For example: - // input = `ceph version 14.2.11-139 (5c0dc966af809fd1d429ec7bac48962a746af243) nautilus (stable)` - // output = [(5c0dc966af809fd1d429ec7bac48962a746af243) 5c0dc966af809fd1d429ec7bac48962a746af243] - commitIDPattern = regexp.MustCompile(`\(([^)]+)\)`) - - logger = capnslog.NewPackageLogger("github.com/rook/rook", "cephver") -) - -func (v *CephVersion) String() string { - return fmt.Sprintf("%d.%d.%d-%d %s", - v.Major, v.Minor, v.Extra, v.Build, v.ReleaseName()) -} - -// CephVersionFormatted returns the Ceph version in a human readable format -func (v *CephVersion) CephVersionFormatted() string { - return fmt.Sprintf("ceph version %d.%d.%d-%d %s", - v.Major, v.Minor, v.Extra, v.Build, v.ReleaseName()) -} - -// ReleaseName is the name of the Ceph release -func (v *CephVersion) ReleaseName() string { - switch v.Major { - case Nautilus.Major: - return "nautilus" - case Octopus.Major: - return "octopus" - case Pacific.Major: - return "pacific" - case Quincy.Major: - return "quincy" - default: - return unknownVersionString - } -} - -// ExtractCephVersion extracts the major, minor and extra digit of a Ceph release -func ExtractCephVersion(src string) (*CephVersion, error) { - var build int - var commitID string - versionMatch := versionPattern.FindStringSubmatch(src) - if versionMatch == nil { - return nil, errors.Errorf("failed to parse version from: %q", src) - } - - major, err := strconv.Atoi(versionMatch[1]) - if err != nil { - return nil, errors.Errorf("failed to parse version major part: %q", versionMatch[1]) - } - - minor, err := strconv.Atoi(versionMatch[2]) - if err != nil { - return nil, errors.Errorf("failed to parse version minor part: %q", versionMatch[2]) - } - - extra, err := strconv.Atoi(versionMatch[3]) - if err != nil { - return nil, errors.Errorf("failed to parse version extra part: %q", versionMatch[3]) - } - - // See if we are running on a build release - buildVersionMatch := buildVersionPattern.FindStringSubmatch(src) - // We don't need to handle any error here, so let's jump in only when "mm" has content - if buildVersionMatch != nil { - build, err = strconv.Atoi(buildVersionMatch[4]) - if err != nil { - logger.Warningf("failed to convert version build number part %q to an integer, ignoring", buildVersionMatch[4]) - } - } - - commitIDMatch := commitIDPattern.FindStringSubmatch(src) - if commitIDMatch != nil { - commitID = commitIDMatch[1] - } - - return &CephVersion{major, minor, extra, build, commitID}, nil -} - -// Supported checks if a given release is supported -func (v *CephVersion) Supported() bool { - for _, sv := range supportedVersions { - if v.isRelease(sv) { - return true - } - } - return false -} - -// Unsupported checks if a given release is supported -func (v *CephVersion) Unsupported() bool { - for _, sv := range unsupportedVersions { - if v.isExactly(sv) { - return true - } - } - return false -} - -func (v *CephVersion) isRelease(other CephVersion) bool { - return v.Major == other.Major -} - -func (v *CephVersion) isExactly(other CephVersion) bool { - return v.Major == other.Major && v.Minor == other.Minor && v.Extra == other.Extra -} - -// IsNautilus checks if the Ceph version is Nautilus -func (v *CephVersion) IsNautilus() bool { - return v.isRelease(Nautilus) -} - -// IsOctopus checks if the Ceph version is Octopus -func (v *CephVersion) IsOctopus() bool { - return v.isRelease(Octopus) -} - -// IsPacific checks if the Ceph version is Pacific -func (v *CephVersion) IsPacific() bool { - return v.isRelease(Pacific) -} - -// IsQuincy checks if the Ceph version is Quincy -func (v *CephVersion) IsQuincy() bool { - return v.isRelease(Quincy) -} - -// IsAtLeast checks a given Ceph version is at least a given one -func (v *CephVersion) IsAtLeast(other CephVersion) bool { - if v.Major > other.Major { - return true - } else if v.Major < other.Major { - return false - } - // If we arrive here then v.Major == other.Major - if v.Minor > other.Minor { - return true - } else if v.Minor < other.Minor { - return false - } - // If we arrive here then v.Minor == other.Minor - if v.Extra > other.Extra { - return true - } else if v.Extra < other.Extra { - return false - } - // If we arrive here then both versions are identical - return true -} - -// IsAtLeastQuincy check that the Ceph version is at least Quincy -func (v *CephVersion) IsAtLeastQuincy() bool { - return v.IsAtLeast(Quincy) -} - -// IsAtLeastPacific check that the Ceph version is at least Pacific -func (v *CephVersion) IsAtLeastPacific() bool { - return v.IsAtLeast(Pacific) -} - -// IsAtLeastOctopus check that the Ceph version is at least Octopus -func (v *CephVersion) IsAtLeastOctopus() bool { - return v.IsAtLeast(Octopus) -} - -// IsAtLeastNautilus check that the Ceph version is at least Nautilus -func (v *CephVersion) IsAtLeastNautilus() bool { - return v.IsAtLeast(Nautilus) -} - -// IsIdentical checks if Ceph versions are identical -func IsIdentical(a, b CephVersion) bool { - if a.Major == b.Major { - if a.Minor == b.Minor { - if a.Extra == b.Extra { - if a.Build == b.Build { - if a.CommitID == b.CommitID { - return true - } - } - } - } - } - - return false -} - -// IsSuperior checks if a given version if superior to another one -func IsSuperior(a, b CephVersion) bool { - if a.Major > b.Major { - return true - } - if a.Major == b.Major { - if a.Minor > b.Minor { - return true - } - } - if a.Major == b.Major { - if a.Minor == b.Minor { - if a.Extra > b.Extra { - return true - } - } - } - if a.Major == b.Major { - if a.Minor == b.Minor { - if a.Extra == b.Extra { - if a.Build > b.Build { - return true - } - if a.CommitID != b.CommitID { - return true - } - } - } - } - - return false -} - -// IsInferior checks if a given version if inferior to another one -func IsInferior(a, b CephVersion) bool { - if a.Major < b.Major { - return true - } - if a.Major == b.Major { - if a.Minor < b.Minor { - return true - } - } - if a.Major == b.Major { - if a.Minor == b.Minor { - if a.Extra < b.Extra { - return true - } - } - } - if a.Major == b.Major { - if a.Minor == b.Minor { - if a.Extra == b.Extra { - if a.Build < b.Build { - return true - } - } - } - } - - return false -} - -// ValidateCephVersionsBetweenLocalAndExternalClusters makes sure an external cluster can be connected -// by checking the external ceph versions available and comparing it with the local image provided -func ValidateCephVersionsBetweenLocalAndExternalClusters(localVersion, externalVersion CephVersion) error { - logger.Debugf("local version is %q, external version is %q", localVersion.String(), externalVersion.String()) - - // We only support Nautilus or newer - if !externalVersion.IsAtLeastNautilus() { - return errors.Errorf("unsupported ceph version %q, need at least nautilus, delete your cluster CR and create a new one with a correct ceph version", externalVersion.String()) - } - - // Identical version, regardless if other CRs are running, it's ok! - if IsIdentical(localVersion, externalVersion) { - return nil - } - - // Local version must never be higher than the external one - if IsSuperior(localVersion, externalVersion) { - return errors.Errorf("local cluster ceph version is higher %q than the external cluster %q, this must never happen", externalVersion.String(), localVersion.String()) - } - - // External cluster was updated to a minor version higher, consider updating too! - if localVersion.Major == externalVersion.Major { - if IsSuperior(externalVersion, localVersion) { - logger.Warningf("external cluster ceph version is a minor version higher %q than the local cluster %q, consider upgrading", externalVersion.String(), localVersion.String()) - return nil - } - } - - // The external cluster was upgraded, consider upgrading too! - if localVersion.Major < externalVersion.Major { - logger.Errorf("external cluster ceph version is a major version higher %q than the local cluster %q, consider upgrading", externalVersion.String(), localVersion.String()) - return nil - } - - return nil -} diff --git a/pkg/operator/ceph/version/version_test.go b/pkg/operator/ceph/version/version_test.go deleted file mode 100644 index a97b2eb76..000000000 --- a/pkg/operator/ceph/version/version_test.go +++ /dev/null @@ -1,244 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestToString(t *testing.T) { - assert.Equal(t, "14.0.0-0 nautilus", Nautilus.String()) - assert.Equal(t, "15.0.0-0 octopus", Octopus.String()) - received := CephVersion{-1, 0, 0, 0, ""} - - expected := fmt.Sprintf("-1.0.0-0 %s", unknownVersionString) - assert.Equal(t, expected, received.String()) -} - -func TestCephVersionFormatted(t *testing.T) { - assert.Equal(t, "ceph version 14.0.0-0 nautilus", Nautilus.CephVersionFormatted()) - assert.Equal(t, "ceph version 15.0.0-0 octopus", Octopus.CephVersionFormatted()) -} - -func TestReleaseName(t *testing.T) { - assert.Equal(t, "nautilus", Nautilus.ReleaseName()) - assert.Equal(t, "octopus", Octopus.ReleaseName()) - ver := CephVersion{-1, 0, 0, 0, ""} - assert.Equal(t, unknownVersionString, ver.ReleaseName()) -} - -func extractVersionHelper(t *testing.T, text string, major, minor, extra, build int, commitID string) { - v, err := ExtractCephVersion(text) - if assert.NoError(t, err) { - assert.Equal(t, *v, CephVersion{major, minor, extra, build, commitID}) - } -} - -func TestExtractVersion(t *testing.T) { - // release build - v0c := "ceph version 16.2.6 (ae699615bac534ea496ee965ac6192cb7e0e07c1) pacific (stable)" - v0d := ` -root@7a97f5a78bc6:/# ceph --version -ceph version 16.2.6 (ae699615bac534ea496ee965ac6192cb7e0e07c1) pacific (stable) -` - extractVersionHelper(t, v0c, 16, 2, 6, 0, "ae699615bac534ea496ee965ac6192cb7e0e07c1") - extractVersionHelper(t, v0d, 16, 2, 6, 0, "ae699615bac534ea496ee965ac6192cb7e0e07c1") - - // development build - v1c := "ceph version 16.1.33-403-g7ba6bece41 (7ba6bece4187eda5d05a9b84211fe6ba8dd287bd) pacific (rc)" - v1d := ` -bin/ceph --version -*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** -ceph version 16.1.33-403-g7ba6bece41 -(7ba6bece4187eda5d05a9b84211fe6ba8dd287bd) nautilus (rc) -` - extractVersionHelper(t, v1c, 16, 1, 33, 403, "7ba6bece4187eda5d05a9b84211fe6ba8dd287bd") - extractVersionHelper(t, v1d, 16, 1, 33, 403, "7ba6bece4187eda5d05a9b84211fe6ba8dd287bd") - - // build without git version info. it is possible to build the ceph tree - // without a version number, but none of the container builds do this. - // it is arguable that this should be a requirement since we are - // explicitly adding fine-grained versioning to avoid issues with - // release granularity. adding the reverse name-to-version is easy - // enough if this ever becomes a need. - v2c := "ceph version Development (no_version) pacific (rc)" - v2d := ` -bin/ceph --version -*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** -ceph version Development (no_version) nautilus (rc) -` - v, err := ExtractCephVersion(v2c) - assert.Error(t, err) - assert.Nil(t, v) - - v, err = ExtractCephVersion(v2d) - assert.Error(t, err) - assert.Nil(t, v) - - // Test the round trip for serializing and deserializing the version - v3c := "ceph version 16.2.5-1 pacific" - v, err = ExtractCephVersion(v3c) - assert.NoError(t, err) - assert.NotNil(t, v) - assert.Equal(t, "16.2.5-1 pacific", v.String()) -} - -func TestSupported(t *testing.T) { - for _, v := range supportedVersions { - assert.True(t, v.Supported()) - } -} - -func TestIsRelease(t *testing.T) { - assert.True(t, Nautilus.isRelease(Nautilus)) - assert.True(t, Octopus.isRelease(Octopus)) - assert.True(t, Pacific.isRelease(Pacific)) - assert.True(t, Quincy.isRelease(Quincy)) - - assert.False(t, Octopus.isRelease(Nautilus)) - - OctopusUpdate := Octopus - OctopusUpdate.Minor = 33 - OctopusUpdate.Extra = 4 - assert.True(t, OctopusUpdate.isRelease(Octopus)) - - NautilusUpdate := Nautilus - NautilusUpdate.Minor = 33 - NautilusUpdate.Extra = 4 - assert.True(t, NautilusUpdate.isRelease(Nautilus)) -} - -func TestIsReleaseX(t *testing.T) { - assert.True(t, Nautilus.IsNautilus()) - assert.False(t, Octopus.IsNautilus()) -} - -func TestVersionAtLeast(t *testing.T) { - assert.True(t, Nautilus.IsAtLeast(Nautilus)) - assert.False(t, Nautilus.IsAtLeast(Octopus)) - assert.True(t, Octopus.IsAtLeast(Nautilus)) - assert.True(t, Octopus.IsAtLeast(Octopus)) - - assert.True(t, (&CephVersion{1, 0, 0, 0, ""}).IsAtLeast(CephVersion{0, 0, 0, 0, ""})) - assert.False(t, (&CephVersion{0, 0, 0, 0, ""}).IsAtLeast(CephVersion{1, 0, 0, 0, ""})) - assert.True(t, (&CephVersion{1, 1, 0, 0, ""}).IsAtLeast(CephVersion{1, 0, 0, 0, ""})) - assert.False(t, (&CephVersion{1, 0, 0, 0, ""}).IsAtLeast(CephVersion{1, 1, 0, 0, ""})) - assert.True(t, (&CephVersion{1, 1, 1, 0, ""}).IsAtLeast(CephVersion{1, 1, 0, 0, ""})) - assert.False(t, (&CephVersion{1, 1, 0, 0, ""}).IsAtLeast(CephVersion{1, 1, 1, 0, ""})) - assert.True(t, (&CephVersion{1, 1, 1, 0, ""}).IsAtLeast(CephVersion{1, 1, 1, 0, ""})) -} - -func TestVersionAtLeastX(t *testing.T) { - assert.True(t, Octopus.IsAtLeastOctopus()) - assert.True(t, Octopus.IsAtLeastNautilus()) - assert.True(t, Nautilus.IsAtLeastNautilus()) - assert.True(t, Pacific.IsAtLeastPacific()) - assert.False(t, Nautilus.IsAtLeastOctopus()) - assert.False(t, Nautilus.IsAtLeastPacific()) -} - -func TestIsIdentical(t *testing.T) { - assert.True(t, IsIdentical(CephVersion{14, 2, 2, 0, ""}, CephVersion{14, 2, 2, 0, ""})) - assert.False(t, IsIdentical(CephVersion{14, 2, 2, 0, ""}, CephVersion{15, 2, 2, 0, ""})) -} - -func TestIsSuperior(t *testing.T) { - assert.False(t, IsSuperior(CephVersion{14, 2, 2, 0, ""}, CephVersion{14, 2, 2, 0, ""})) - assert.False(t, IsSuperior(CephVersion{14, 2, 2, 0, ""}, CephVersion{15, 2, 2, 0, ""})) - assert.True(t, IsSuperior(CephVersion{15, 2, 2, 0, ""}, CephVersion{14, 2, 2, 0, ""})) - assert.True(t, IsSuperior(CephVersion{15, 2, 2, 0, ""}, CephVersion{15, 1, 3, 0, ""})) - assert.True(t, IsSuperior(CephVersion{15, 2, 2, 0, ""}, CephVersion{15, 2, 1, 0, ""})) - assert.True(t, IsSuperior(CephVersion{15, 2, 2, 1, ""}, CephVersion{15, 2, 1, 0, ""})) -} - -func TestIsInferior(t *testing.T) { - assert.False(t, IsInferior(CephVersion{14, 2, 2, 0, ""}, CephVersion{14, 2, 2, 0, ""})) - assert.False(t, IsInferior(CephVersion{15, 2, 2, 0, ""}, CephVersion{14, 2, 2, 0, ""})) - assert.True(t, IsInferior(CephVersion{14, 2, 2, 0, ""}, CephVersion{15, 2, 2, 0, ""})) - assert.True(t, IsInferior(CephVersion{15, 1, 3, 0, ""}, CephVersion{15, 2, 2, 0, ""})) - assert.True(t, IsInferior(CephVersion{15, 2, 1, 0, ""}, CephVersion{15, 2, 2, 0, ""})) - assert.True(t, IsInferior(CephVersion{15, 2, 1, 0, ""}, CephVersion{15, 2, 2, 1, ""})) -} - -func TestValidateCephVersionsBetweenLocalAndExternalClusters(t *testing.T) { - // TEST 1: versions are identical - localCephVersion := CephVersion{Major: 14, Minor: 2, Extra: 1} - externalCephVersion := CephVersion{Major: 14, Minor: 2, Extra: 1} - err := ValidateCephVersionsBetweenLocalAndExternalClusters(localCephVersion, externalCephVersion) - assert.NoError(t, err) - - // TEST 2: local cluster version major is lower than external cluster version - localCephVersion = CephVersion{Major: 14, Minor: 2, Extra: 1} - externalCephVersion = CephVersion{Major: 15, Minor: 2, Extra: 1} - err = ValidateCephVersionsBetweenLocalAndExternalClusters(localCephVersion, externalCephVersion) - assert.NoError(t, err) - - // TEST 3: local cluster version major is higher than external cluster version - localCephVersion = CephVersion{Major: 15, Minor: 2, Extra: 1} - externalCephVersion = CephVersion{Major: 14, Minor: 2, Extra: 1} - err = ValidateCephVersionsBetweenLocalAndExternalClusters(localCephVersion, externalCephVersion) - assert.Error(t, err) - - // TEST 4: local version is > but from a minor release - // local version must never be higher - localCephVersion = CephVersion{Major: 14, Minor: 2, Extra: 2} - externalCephVersion = CephVersion{Major: 14, Minor: 2, Extra: 1} - err = ValidateCephVersionsBetweenLocalAndExternalClusters(localCephVersion, externalCephVersion) - assert.Error(t, err) - - // TEST 5: external version is > but from a minor release - localCephVersion = CephVersion{Major: 14, Minor: 2, Extra: 1} - externalCephVersion = CephVersion{Major: 14, Minor: 2, Extra: 2} - err = ValidateCephVersionsBetweenLocalAndExternalClusters(localCephVersion, externalCephVersion) - assert.NoError(t, err) -} - -func TestCephVersion_Unsupported(t *testing.T) { - type fields struct { - Major int - Minor int - Extra int - Build int - } - tests := []struct { - name string - fields fields - want bool - }{ - {"supported", fields{Major: 14, Minor: 2, Extra: 1, Build: 0}, false}, - {"supported", fields{Major: 14, Minor: 2, Extra: 12, Build: 0}, false}, - {"supported", fields{Major: 15, Minor: 2, Extra: 1, Build: 0}, false}, - {"supported", fields{Major: 15, Minor: 2, Extra: 6, Build: 0}, false}, - {"unsupported", fields{Major: 14, Minor: 2, Extra: 13, Build: 0}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - v := &CephVersion{ - Major: tt.fields.Major, - Minor: tt.fields.Minor, - Extra: tt.fields.Extra, - Build: tt.fields.Build, - } - if got := v.Unsupported(); got != tt.want { - t.Errorf("CephVersion.Unsupported() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/operator/ceph/webhook.go b/pkg/operator/ceph/webhook.go deleted file mode 100644 index 89f1b54fc..000000000 --- a/pkg/operator/ceph/webhook.go +++ /dev/null @@ -1,260 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operator - -import ( - "context" - "os" - - "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/pkg/operator/ceph/csi" - "github.com/rook/rook/pkg/operator/k8sutil" - v1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" -) - -const ( - appName = "rook-ceph-admission-controller" - secretVolumeName = "webhook-certificates" // #nosec G101 This is just a var name, not a real secret - serviceAccountName = "rook-ceph-admission-controller" - portName = "webhook-api" - servicePort int32 = 443 - serverPort int32 = 8079 - tlsDir = "/etc/webhook" - admissionControllerTolerationsEnv = "ADMISSION_CONTROLLER_TOLERATIONS" - admissionControllerNodeAffinityEnv = "ADMISSION_CONTROLLER_NODE_AFFINITY" -) - -var ( - namespace = os.Getenv(k8sutil.PodNamespaceEnvVar) -) - -func isSecretPresent(ctx context.Context, context *clusterd.Context) (bool, error) { - logger.Infof("looking for secret %q", appName) - _, err := context.Clientset.CoreV1().Secrets(namespace).Get(ctx, appName, metav1.GetOptions{}) - if err != nil { - // If secret is not found. All good ! Proceed with rook without admission controllers - if apierrors.IsNotFound(err) { - logger.Infof("secret %q not found. proceeding without the admission controller", appName) - return false, nil - } - return false, err - } - return true, nil -} - -func createWebhookService(context *clusterd.Context) error { - webhookService := corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: namespace, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: servicePort, - TargetPort: intstr.IntOrString{ - IntVal: serverPort, - }, - }, - }, - - Selector: map[string]string{ - k8sutil.AppAttr: appName, - }, - }, - } - - _, err := k8sutil.CreateOrUpdateService(context.Clientset, namespace, &webhookService) - if err != nil && !apierrors.IsAlreadyExists(err) { - return err - } - return nil -} - -// StartControllerIfSecretPresent will initialize the webhook if secret is detected -func StartControllerIfSecretPresent(ctx context.Context, context *clusterd.Context, admissionImage string) error { - isPresent, err := isSecretPresent(ctx, context) - if err != nil { - return errors.Wrap(err, "failed to retrieve secret") - } - if isPresent { - err = initWebhook(ctx, context, admissionImage) - if err != nil { - return errors.Wrap(err, "failed to initialize webhook") - } - } - return nil -} - -func initWebhook(ctx context.Context, context *clusterd.Context, admissionImage string) error { - // At this point volume should be mounted, so proceed with creating the service and validatingwebhookconfig - err := createWebhookService(context) - if err != nil { - return errors.Wrap(err, "failed to create service") - } - err = createWebhookDeployment(ctx, context, admissionImage) - if err != nil { - return errors.Wrap(err, "failed to create deployment") - } - return nil -} - -func createWebhookDeployment(ctx context.Context, context *clusterd.Context, admissionImage string) error { - logger.Info("creating admission controller pods") - admission_parameters := []string{"ceph", - "admission-controller"} - secretVolume := getSecretVolume() - secretVolumeMount := getSecretVolumeMount() - - antiAffinity := csi.GetPodAntiAffinity(k8sutil.AppAttr, appName) - admissionControllerDeployment := getDeployment(ctx, context, secretVolume, antiAffinity, admissionImage, admission_parameters, secretVolumeMount) - - _, err := k8sutil.CreateOrUpdateDeployment(context.Clientset, &admissionControllerDeployment) - if err != nil { - return errors.Wrap(err, "failed to create admission-controller deployment") - } - - return nil -} - -func getDeployment(ctx context.Context, context *clusterd.Context, secretVolume corev1.Volume, antiAffinity corev1.PodAntiAffinity, - admissionImage string, admission_parameters []string, secretVolumeMount corev1.VolumeMount) v1.Deployment { - var replicas int32 = 2 - nodes, err := context.Clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - if err == nil { - if len(nodes.Items) == 1 { - replicas = 1 - } - } else { - logger.Errorf("failed to get nodes. Defaulting the number of replicas of admission controller pods to 2. %v", err) - } - - admissionControllerDeployment := v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: namespace, - }, - Spec: v1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - k8sutil.AppAttr: appName, - }, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: namespace, - Labels: map[string]string{ - k8sutil.AppAttr: appName, - }, - }, - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - secretVolume, - }, - Containers: []corev1.Container{ - { - Name: appName, - Image: admissionImage, - Args: admission_parameters, - Ports: []corev1.ContainerPort{ - { - Name: portName, - ContainerPort: serverPort, - }, - }, - VolumeMounts: []corev1.VolumeMount{ - secretVolumeMount, - }, - }, - }, - ServiceAccountName: serviceAccountName, - Affinity: &corev1.Affinity{ - PodAntiAffinity: &antiAffinity, - NodeAffinity: getNodeAffinity(context.Clientset), - }, - Tolerations: getTolerations(context.Clientset), - }, - }, - }, - } - return admissionControllerDeployment -} - -func getSecretVolumeMount() corev1.VolumeMount { - secretVolumeMount := corev1.VolumeMount{ - Name: secretVolumeName, - ReadOnly: true, - MountPath: tlsDir, - } - return secretVolumeMount -} - -func getSecretVolume() corev1.Volume { - secretVolume := corev1.Volume{ - Name: secretVolumeName, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: appName, - }, - }, - } - return secretVolume -} - -func getTolerations(clientset kubernetes.Interface) []corev1.Toleration { - // Add toleration if any - tolerations := []corev1.Toleration{} - tolerationsRaw, err := k8sutil.GetOperatorSetting(clientset, controller.OperatorSettingConfigMapName, admissionControllerTolerationsEnv, "") - if err != nil { - logger.Warningf("toleration will be empty because failed to read the setting. %v", err) - return tolerations - } - tolerations, err = k8sutil.YamlToTolerations(tolerationsRaw) - if err != nil { - logger.Warningf("toleration will be empty because failed to parse the setting %q. %v", tolerationsRaw, err) - return tolerations - } - return tolerations -} - -func getNodeAffinity(clientset kubernetes.Interface) *corev1.NodeAffinity { - // Add NodeAffinity if any - v1NodeAffinity := &corev1.NodeAffinity{} - nodeAffinity, err := k8sutil.GetOperatorSetting(clientset, controller.OperatorSettingConfigMapName, admissionControllerNodeAffinityEnv, "") - if err != nil { - // nodeAffinity will be empty by default in case of error - logger.Warningf("node affinity will be empty because failed to read the setting. %v", err) - return v1NodeAffinity - } - if nodeAffinity != "" { - v1NodeAffinity, err = k8sutil.GenerateNodeAffinity(nodeAffinity) - if err != nil { - logger.Warningf("node affinity will be empty because failed to parse the setting %q. %v", nodeAffinity, err) - return v1NodeAffinity - } - } - return v1NodeAffinity -} diff --git a/pkg/operator/ceph/webhook_test.go b/pkg/operator/ceph/webhook_test.go deleted file mode 100644 index 0b186ebf4..000000000 --- a/pkg/operator/ceph/webhook_test.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operator - -import ( - "context" - "os" - "testing" - - "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" -) - -const ( - testNamespace = "test-namespace" -) - -func TestGetTolerations(t *testing.T) { - ctx := context.TODO() - clientset := fake.NewSimpleClientset() - os.Setenv("POD_NAMESPACE", testNamespace) - - // No setting results in the default value. - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: controller.OperatorSettingConfigMapName, - Namespace: testNamespace, - }, - Data: map[string]string{}, - } - _, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(ctx, cm, metav1.CreateOptions{}) - assert.NoError(t, err) - tolerations := getTolerations(clientset) - expected := []v1.Toleration{} - assert.Equal(t, expected, tolerations) - - // The invalid setting results in the default value. - cm.Data = map[string]string{ - admissionControllerTolerationsEnv: "", - } - _, err = clientset.CoreV1().ConfigMaps(testNamespace).Update(ctx, cm, metav1.UpdateOptions{}) - assert.NoError(t, err) - tolerations = getTolerations(clientset) - assert.Equal(t, expected, tolerations) - - // Correct setting result in the desired value. - cm.Data = map[string]string{ - admissionControllerTolerationsEnv: ` -- effect: NoSchedule - key: node-role.kubernetes.io/controlplane - operator: Exists`, - } - _, err = clientset.CoreV1().ConfigMaps(testNamespace).Update(ctx, cm, metav1.UpdateOptions{}) - assert.NoError(t, err) - tolerations = getTolerations(clientset) - expected = []v1.Toleration{ - { - Effect: "NoSchedule", - Key: "node-role.kubernetes.io/controlplane", - Operator: v1.TolerationOpExists, - }, - } - assert.Equal(t, expected, tolerations) -} - -func TestGetNodeAffinity(t *testing.T) { - ctx := context.TODO() - clientset := fake.NewSimpleClientset() - os.Setenv("POD_NAMESPACE", testNamespace) - - // No setting results in the default value. - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: controller.OperatorSettingConfigMapName, - Namespace: testNamespace, - }, - Data: map[string]string{}, - } - _, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(ctx, cm, metav1.CreateOptions{}) - assert.NoError(t, err) - nodeAffinity := getNodeAffinity(clientset) - expected := &v1.NodeAffinity{} - assert.Equal(t, expected, nodeAffinity) - - // The invalid setting results in the default value. - cm.Data = map[string]string{ - admissionControllerNodeAffinityEnv: "", - } - _, err = clientset.CoreV1().ConfigMaps(testNamespace).Update(ctx, cm, metav1.UpdateOptions{}) - assert.NoError(t, err) - nodeAffinity = getNodeAffinity(clientset) - assert.Equal(t, expected, nodeAffinity) - - // Correct setting result in the desired value. - cm.Data = map[string]string{ - admissionControllerNodeAffinityEnv: "role=storage-node", - } - _, err = clientset.CoreV1().ConfigMaps(testNamespace).Update(ctx, cm, metav1.UpdateOptions{}) - assert.NoError(t, err) - nodeAffinity = getNodeAffinity(clientset) - expected = &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "role", - Operator: v1.NodeSelectorOpIn, - Values: []string{"storage-node"}, - }, - }, - }, - }, - }, - } - assert.Equal(t, expected, nodeAffinity) -} diff --git a/pkg/operator/discover/discover.go b/pkg/operator/discover/discover.go deleted file mode 100644 index e46224f52..000000000 --- a/pkg/operator/discover/discover.go +++ /dev/null @@ -1,483 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package discover to discover devices on storage nodes. -package discover - -import ( - "context" - "encoding/json" - "fmt" - "os" - "regexp" - "strings" - "time" - - "github.com/coreos/pkg/capnslog" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - discoverDaemon "github.com/rook/rook/pkg/daemon/discover" - k8sutil "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/sys" - - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -const ( - discoverDaemonsetName = "rook-discover" - discoverDaemonsetPriorityClassNameEnv = "DISCOVER_PRIORITY_CLASS_NAME" - discoverDaemonsetTolerationEnv = "DISCOVER_TOLERATION" - discoverDaemonsetTolerationKeyEnv = "DISCOVER_TOLERATION_KEY" - discoverDaemonsetTolerationsEnv = "DISCOVER_TOLERATIONS" - discoverDaemonSetNodeAffinityEnv = "DISCOVER_AGENT_NODE_AFFINITY" - discoverDaemonSetPodLabelsEnv = "DISCOVER_AGENT_POD_LABELS" - deviceInUseCMName = "local-device-in-use-cluster-%s-node-%s" - deviceInUseAppName = "rook-claimed-devices" - deviceInUseClusterAttr = "rook.io/cluster" - discoverIntervalEnv = "ROOK_DISCOVER_DEVICES_INTERVAL" - defaultDiscoverInterval = "60m" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-discover") - -// Discover reference to be deployed -type Discover struct { - clientset kubernetes.Interface -} - -// New creates an instance of Discover -func New(clientset kubernetes.Interface) *Discover { - return &Discover{ - clientset: clientset, - } -} - -// Start the discover -func (d *Discover) Start(namespace, discoverImage, securityAccount string, useCephVolume bool) error { - - err := d.createDiscoverDaemonSet(namespace, discoverImage, securityAccount, useCephVolume) - if err != nil { - return fmt.Errorf("Error starting discover daemonset: %v", err) - } - return nil -} - -func (d *Discover) createDiscoverDaemonSet(namespace, discoverImage, securityAccount string, useCephVolume bool) error { - ctx := context.TODO() - privileged := true - discovery_parameters := []string{"discover", - "--discover-interval", getEnvVar(discoverIntervalEnv, defaultDiscoverInterval)} - if useCephVolume { - discovery_parameters = append(discovery_parameters, "--use-ceph-volume") - } - - ds := &apps.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: discoverDaemonsetName, - Labels: map[string]string{ - "app": discoverDaemonsetName, - }, - }, - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": discoverDaemonsetName, - }, - }, - UpdateStrategy: apps.DaemonSetUpdateStrategy{ - Type: apps.RollingUpdateDaemonSetStrategyType, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app": discoverDaemonsetName, - }, - }, - Spec: v1.PodSpec{ - ServiceAccountName: securityAccount, - Containers: []v1.Container{ - { - Name: discoverDaemonsetName, - Image: discoverImage, - Args: discovery_parameters, - SecurityContext: &v1.SecurityContext{ - Privileged: &privileged, - }, - VolumeMounts: []v1.VolumeMount{ - { - Name: "dev", - MountPath: "/dev", - // discovery pod could fail to start if /dev is mounted ro - ReadOnly: false, - }, - { - Name: "sys", - MountPath: "/sys", - ReadOnly: true, - }, - { - Name: "udev", - MountPath: "/run/udev", - ReadOnly: true, - }, - }, - Env: []v1.EnvVar{ - k8sutil.NamespaceEnvVar(), - k8sutil.NodeEnvVar(), - k8sutil.NameEnvVar(), - }, - }, - }, - Volumes: []v1.Volume{ - { - Name: "dev", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: "/dev", - }, - }, - }, - { - Name: "sys", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: "/sys", - }, - }, - }, - { - Name: "udev", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: "/run/udev", - }, - }, - }, - }, - HostNetwork: false, - PriorityClassName: os.Getenv(discoverDaemonsetPriorityClassNameEnv), - }, - }, - }, - } - // Get the operator pod details to attach the owner reference to the discover daemon set - operatorPod, err := k8sutil.GetRunningPod(d.clientset) - if err != nil { - logger.Errorf("failed to get operator pod. %+v", err) - } else { - k8sutil.SetOwnerRefsWithoutBlockOwner(&ds.ObjectMeta, operatorPod.OwnerReferences) - } - - // Add toleration if any - tolerationValue := os.Getenv(discoverDaemonsetTolerationEnv) - if tolerationValue != "" { - ds.Spec.Template.Spec.Tolerations = []v1.Toleration{ - { - Effect: v1.TaintEffect(tolerationValue), - Operator: v1.TolerationOpExists, - Key: os.Getenv(discoverDaemonsetTolerationKeyEnv), - }, - } - } - - tolerationsRaw := os.Getenv(discoverDaemonsetTolerationsEnv) - tolerations, err := k8sutil.YamlToTolerations(tolerationsRaw) - if err != nil { - logger.Warningf("failed to parse %s. %+v", tolerationsRaw, err) - } - ds.Spec.Template.Spec.Tolerations = append(ds.Spec.Template.Spec.Tolerations, tolerations...) - - // Add NodeAffinity if any - nodeAffinity := os.Getenv(discoverDaemonSetNodeAffinityEnv) - if nodeAffinity != "" { - v1NodeAffinity, err := k8sutil.GenerateNodeAffinity(nodeAffinity) - if err != nil { - logger.Errorf("failed to create NodeAffinity. %+v", err) - } else { - ds.Spec.Template.Spec.Affinity = &v1.Affinity{ - NodeAffinity: v1NodeAffinity, - } - } - } - - podLabels := os.Getenv(discoverDaemonSetPodLabelsEnv) - if podLabels != "" { - podLabels := k8sutil.ParseStringToLabels(podLabels) - // Override / Set the app label even if set by the user as - // otherwise the DaemonSet pod selector may be broken - podLabels["app"] = discoverDaemonsetName - ds.Spec.Template.ObjectMeta.Labels = podLabels - } - - _, err = d.clientset.AppsV1().DaemonSets(namespace).Create(ctx, ds, metav1.CreateOptions{}) - if err != nil { - if !k8serrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to create rook-discover daemon set. %+v", err) - } - logger.Infof("rook-discover daemonset already exists, updating ...") - _, err = d.clientset.AppsV1().DaemonSets(namespace).Update(ctx, ds, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("failed to update rook-discover daemon set. %+v", err) - } - } else { - logger.Infof("rook-discover daemonset started") - } - return nil - -} - -func getEnvVar(varName string, defaultValue string) string { - envValue := os.Getenv(varName) - if envValue != "" { - return envValue - } - return defaultValue -} - -// ListDevices lists all devices discovered on all nodes or specific node if node name is provided. -func ListDevices(clusterdContext *clusterd.Context, namespace, nodeName string) (map[string][]sys.LocalDisk, error) { - ctx := context.TODO() - // convert the host name label to the k8s node name to look up the configmap with the devices - if len(nodeName) > 0 { - var err error - nodeName, err = k8sutil.GetNodeNameFromHostname(clusterdContext.Clientset, nodeName) - if err != nil { - logger.Warningf("failed to get node name from hostname. %+v", err) - } - } - - var devices map[string][]sys.LocalDisk - listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", k8sutil.AppAttr, discoverDaemon.AppName)} - // wait for device discovery configmaps - retryCount := 0 - retryMax := 30 - sleepTime := 5 - for { - retryCount++ - if retryCount > retryMax { - return devices, fmt.Errorf("exceeded max retry count waiting for device configmap to appear") - } - - if retryCount > 1 { - // only sleep after the first time - <-time.After(time.Duration(sleepTime) * time.Second) - } - - cms, err := clusterdContext.Clientset.CoreV1().ConfigMaps(namespace).List(ctx, listOpts) - if err != nil { - logger.Warningf("failed to list device configmaps: %v", err) - return devices, fmt.Errorf("failed to list device configmaps: %+v", err) - } - if len(cms.Items) == 0 { - logger.Infof("no configmap match, retry #%d", retryCount) - continue - } - devices = make(map[string][]sys.LocalDisk, len(cms.Items)) - for _, cm := range cms.Items { - node := cm.ObjectMeta.Labels[discoverDaemon.NodeAttr] - if len(nodeName) > 0 && node != nodeName { - continue - } - deviceJson := cm.Data[discoverDaemon.LocalDiskCMData] - logger.Debugf("node %s, device %s", node, deviceJson) - - if len(node) == 0 || len(deviceJson) == 0 { - continue - } - var d []sys.LocalDisk - err = json.Unmarshal([]byte(deviceJson), &d) - if err != nil { - logger.Warningf("failed to unmarshal %s", deviceJson) - continue - } - devices[node] = d - } - break - } - logger.Debugf("discovery found the following devices %+v", devices) - return devices, nil -} - -// ListDevicesInUse lists all devices on a node that are already used by existing clusters. -func ListDevicesInUse(clusterdContext *clusterd.Context, namespace, nodeName string) ([]sys.LocalDisk, error) { - ctx := context.TODO() - var devices []sys.LocalDisk - - if len(nodeName) == 0 { - return devices, fmt.Errorf("empty node name") - } - - listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", k8sutil.AppAttr, deviceInUseAppName)} - cms, err := clusterdContext.Clientset.CoreV1().ConfigMaps(namespace).List(ctx, listOpts) - if err != nil { - return devices, fmt.Errorf("failed to list device in use configmaps: %+v", err) - } - - for _, cm := range cms.Items { - node := cm.ObjectMeta.Labels[discoverDaemon.NodeAttr] - if node != nodeName { - continue - } - deviceJson := cm.Data[discoverDaemon.LocalDiskCMData] - logger.Debugf("node %s, device in use %s", node, deviceJson) - - if len(node) == 0 || len(deviceJson) == 0 { - continue - } - var d []sys.LocalDisk - err = json.Unmarshal([]byte(deviceJson), &d) - if err != nil { - logger.Warningf("failed to unmarshal %s", deviceJson) - continue - } - for i := range d { - devices = append(devices, d[i]) - } - } - logger.Debugf("devices in use %+v", devices) - return devices, nil -} - -func matchDeviceFullPath(devLinks, fullpath string) bool { - dlsArr := strings.Split(devLinks, " ") - for i := range dlsArr { - if dlsArr[i] == fullpath { - return true - } - } - return false -} - -// GetAvailableDevices conducts outer join using input filters with free devices that a node has. It marks the devices from join result as in-use. -func GetAvailableDevices(clusterdContext *clusterd.Context, nodeName, clusterName string, devices []cephv1.Device, filter string, useAllDevices bool) ([]cephv1.Device, error) { - ctx := context.TODO() - results := []cephv1.Device{} - if len(devices) == 0 && len(filter) == 0 && !useAllDevices { - return results, nil - } - namespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - // find all devices - allDevices, err := ListDevices(clusterdContext, namespace, nodeName) - if err != nil { - return results, err - } - // find those on the node - nodeAllDevices, ok := allDevices[nodeName] - if !ok { - return results, fmt.Errorf("node %s has no devices", nodeName) - } - // find those in use on the node - devicesInUse, err := ListDevicesInUse(clusterdContext, namespace, nodeName) - if err != nil { - return results, err - } - - nodeDevices := []sys.LocalDisk{} - for _, nodeDevice := range nodeAllDevices { - // TODO: Filter out devices that are in use by another cluster. - // We need to retain the devices in use for this cluster so the provisioner will continue to configure the same OSDs. - for _, device := range devicesInUse { - if nodeDevice.Name == device.Name { - break - } - } - nodeDevices = append(nodeDevices, nodeDevice) - } - claimedDevices := []sys.LocalDisk{} - // now those left are free to use - if len(devices) > 0 { - for i := range devices { - for j := range nodeDevices { - if devices[i].FullPath != "" && matchDeviceFullPath(nodeDevices[j].DevLinks, devices[i].FullPath) { - if devices[i].Name == "" { - devices[i].Name = nodeDevices[j].Name - } - results = append(results, devices[i]) - claimedDevices = append(claimedDevices, nodeDevices[j]) - } else if devices[i].Name == nodeDevices[j].Name { - results = append(results, devices[i]) - claimedDevices = append(claimedDevices, nodeDevices[j]) - } - } - } - } else if len(filter) >= 0 { - for i := range nodeDevices { - //TODO support filter based on other keys - matched, err := regexp.Match(filter, []byte(nodeDevices[i].Name)) - if err == nil && matched { - d := cephv1.Device{ - Name: nodeDevices[i].Name, - } - claimedDevices = append(claimedDevices, nodeDevices[i]) - results = append(results, d) - } - } - } else if useAllDevices { - for i := range nodeDevices { - d := cephv1.Device{ - Name: nodeDevices[i].Name, - } - results = append(results, d) - claimedDevices = append(claimedDevices, nodeDevices[i]) - } - } - // mark these devices in use - if len(claimedDevices) > 0 { - deviceJson, err := json.Marshal(claimedDevices) - if err != nil { - logger.Infof("failed to marshal: %v", err) - return results, err - } - data := make(map[string]string, 1) - data[discoverDaemon.LocalDiskCMData] = string(deviceJson) - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: k8sutil.TruncateNodeName(fmt.Sprintf(deviceInUseCMName, clusterName, "%s"), nodeName), - Namespace: namespace, - Labels: map[string]string{ - k8sutil.AppAttr: deviceInUseAppName, - discoverDaemon.NodeAttr: nodeName, - deviceInUseClusterAttr: clusterName, - }, - }, - Data: data, - } - _, err = clusterdContext.Clientset.CoreV1().ConfigMaps(namespace).Create(ctx, cm, metav1.CreateOptions{}) - if err != nil { - if !k8serrors.IsAlreadyExists(err) { - return results, fmt.Errorf("failed to update device in use for cluster %s node %s: %v", clusterName, nodeName, err) - } - if _, err := clusterdContext.Clientset.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{}); err != nil { - return results, fmt.Errorf("failed to update devices in use. %+v", err) - } - } - } - return results, nil -} - -// Stop the discover -func (d *Discover) Stop(ctx context.Context, namespace string) error { - err := d.clientset.AppsV1().DaemonSets(namespace).Delete(ctx, discoverDaemonsetName, metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return err - } - return nil -} diff --git a/pkg/operator/discover/discover_test.go b/pkg/operator/discover/discover_test.go deleted file mode 100644 index 92472548b..000000000 --- a/pkg/operator/discover/discover_test.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package discover to discover devices on storage nodes. -package discover - -import ( - "context" - "os" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/clusterd" - discoverDaemon "github.com/rook/rook/pkg/daemon/discover" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - - "github.com/stretchr/testify/assert" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestStartDiscoveryDaemonset(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.PodNameEnvVar, "rook-operator") - defer os.Unsetenv(k8sutil.PodNameEnvVar) - - os.Setenv(discoverDaemonsetPriorityClassNameEnv, "my-priority-class") - defer os.Unsetenv(discoverDaemonsetPriorityClassNameEnv) - - namespace := "ns" - a := New(clientset) - - // Create an operator pod - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-operator", - Namespace: "rook-system", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "mypodContainer", - Image: "rook/test", - }, - }, - }, - } - _, err := clientset.CoreV1().Pods("rook-system").Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - // start a basic cluster - err = a.Start(namespace, "rook/rook:myversion", "mysa", false) - assert.Nil(t, err) - - // check daemonset parameters - agentDS, err := clientset.AppsV1().DaemonSets(namespace).Get(ctx, "rook-discover", metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, namespace, agentDS.Namespace) - assert.Equal(t, "rook-discover", agentDS.Name) - assert.Equal(t, "mysa", agentDS.Spec.Template.Spec.ServiceAccountName) - assert.Equal(t, "my-priority-class", agentDS.Spec.Template.Spec.PriorityClassName) - assert.True(t, *agentDS.Spec.Template.Spec.Containers[0].SecurityContext.Privileged) - volumes := agentDS.Spec.Template.Spec.Volumes - assert.Equal(t, 3, len(volumes)) - volumeMounts := agentDS.Spec.Template.Spec.Containers[0].VolumeMounts - assert.Equal(t, 3, len(volumeMounts)) - envs := agentDS.Spec.Template.Spec.Containers[0].Env - assert.Equal(t, 3, len(envs)) - image := agentDS.Spec.Template.Spec.Containers[0].Image - assert.Equal(t, "rook/rook:myversion", image) - assert.Nil(t, agentDS.Spec.Template.Spec.Tolerations) -} - -func TestGetAvailableDevices(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 3) - pvcBackedOSD := false - ns := "rook-system" - nodeName := "node123" - os.Setenv(k8sutil.PodNamespaceEnvVar, ns) - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.PodNameEnvVar, "rook-operator") - defer os.Unsetenv(k8sutil.PodNameEnvVar) - - data := make(map[string]string, 1) - data[discoverDaemon.LocalDiskCMData] = `[{"name":"sdd","parent":"","hasChildren":false,"devLinks":"/dev/disk/by-id/scsi-36001405f826bd553d8c4dbf9f41c18be /dev/disk/by-id/wwn-0x6001405f826bd553d8c4dbf9f41c18be /dev/disk/by-path/ip-127.0.0.1:3260-iscsi-iqn.2016-06.world.srv:storage.target01-lun-1","size":10737418240,"uuid":"","serial":"36001405f826bd553d8c4dbf9f41c18be","type":"disk","rotational":true,"readOnly":false,"ownPartition":true,"filesystem":"","vendor":"LIO-ORG","model":"disk02","wwn":"0x6001405f826bd553","wwnVendorExtension":"0x6001405f826bd553d8c4dbf9f41c18be","empty":true},{"name":"sdb","parent":"","hasChildren":false,"devLinks":"/dev/disk/by-id/scsi-3600140577f462d9908b409d94114e042 /dev/disk/by-id/wwn-0x600140577f462d9908b409d94114e042 /dev/disk/by-path/ip-127.0.0.1:3260-iscsi-iqn.2016-06.world.srv:storage.target01-lun-3","size":5368709120,"uuid":"","serial":"3600140577f462d9908b409d94114e042","type":"disk","rotational":true,"readOnly":false,"ownPartition":false,"filesystem":"","vendor":"LIO-ORG","model":"disk04","wwn":"0x600140577f462d99","wwnVendorExtension":"0x600140577f462d9908b409d94114e042","empty":true},{"name":"sdc","parent":"","hasChildren":false,"devLinks":"/dev/disk/by-id/scsi-3600140568c0bd28d4ee43769387c9f02 /dev/disk/by-id/wwn-0x600140568c0bd28d4ee43769387c9f02 /dev/disk/by-path/ip-127.0.0.1:3260-iscsi-iqn.2016-06.world.srv:storage.target01-lun-2","size":5368709120,"uuid":"","serial":"3600140568c0bd28d4ee43769387c9f02","type":"disk","rotational":true,"readOnly":false,"ownPartition":true,"filesystem":"","vendor":"LIO-ORG","model":"disk03","wwn":"0x600140568c0bd28d","wwnVendorExtension":"0x600140568c0bd28d4ee43769387c9f02","empty":true},{"name":"sda","parent":"","hasChildren":false,"devLinks":"/dev/disk/by-id/scsi-36001405fc00c75fb4c243aa9d61987bd /dev/disk/by-id/wwn-0x6001405fc00c75fb4c243aa9d61987bd /dev/disk/by-path/ip-127.0.0.1:3260-iscsi-iqn.2016-06.world.srv:storage.target01-lun-0","size":10737418240,"uuid":"","serial":"36001405fc00c75fb4c243aa9d61987bd","type":"disk","rotational":true,"readOnly":false,"ownPartition":false,"filesystem":"","vendor":"LIO-ORG","model":"disk01","wwn":"0x6001405fc00c75fb","wwnVendorExtension":"0x6001405fc00c75fb4c243aa9d61987bd","empty":true},{"name":"nvme0n1","parent":"","hasChildren":false,"devLinks":"/dev/disk/by-id/nvme-eui.002538c5710091a7","size":512110190592,"uuid":"","serial":"","type":"disk","rotational":false,"readOnly":false,"ownPartition":false,"filesystem":"","vendor":"","model":"","wwn":"","wwnVendorExtension":"","empty":true}]` - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "local-device-" + nodeName, - Namespace: ns, - Labels: map[string]string{ - k8sutil.AppAttr: discoverDaemon.AppName, - discoverDaemon.NodeAttr: nodeName, - }, - }, - Data: data, - } - _, err := clientset.CoreV1().ConfigMaps(ns).Create(ctx, cm, metav1.CreateOptions{}) - assert.Nil(t, err) - context := &clusterd.Context{ - Clientset: clientset, - } - d := []cephv1.Device{ - { - Name: "sdc", - }, - { - Name: "foo", - }, - } - - nodeDevices, err := ListDevices(context, ns, "" /* all nodes */) - assert.Nil(t, err) - assert.Equal(t, 1, len(nodeDevices)) - - devices, err := GetAvailableDevices(context, nodeName, ns, d, "^sd.", pvcBackedOSD) - assert.Nil(t, err) - assert.Equal(t, 1, len(devices)) - // devices should be in use now, 2nd try gets the same list - devices, err = GetAvailableDevices(context, nodeName, ns, d, "^sd.", pvcBackedOSD) - assert.Nil(t, err) - assert.Equal(t, 1, len(devices)) -} diff --git a/pkg/operator/k8sutil/cmdreporter/cmdreporter.go b/pkg/operator/k8sutil/cmdreporter/cmdreporter.go index 2053bee48..74aef62ea 100644 --- a/pkg/operator/k8sutil/cmdreporter/cmdreporter.go +++ b/pkg/operator/k8sutil/cmdreporter/cmdreporter.go @@ -25,9 +25,9 @@ import ( "github.com/coreos/pkg/capnslog" "github.com/pkg/errors" - "github.com/rook/rook/pkg/daemon/util" + "github.com/rook/cassandra/pkg/daemon/util" - "github.com/rook/rook/pkg/operator/k8sutil" + "github.com/rook/cassandra/pkg/operator/k8sutil" batch "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,7 +50,7 @@ const ( ) var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "CmdReporter") + logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "CmdReporter") ) // CmdReporter is a wrapper for Rook's cmd-reporter commandline utility allowing operators to use diff --git a/pkg/operator/k8sutil/deployment.go b/pkg/operator/k8sutil/deployment.go index f84ed8ad0..7c9ac09c9 100644 --- a/pkg/operator/k8sutil/deployment.go +++ b/pkg/operator/k8sutil/deployment.go @@ -23,8 +23,8 @@ import ( "github.com/banzaicloud/k8s-objectmatcher/patch" "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/util" + "github.com/rook/cassandra/pkg/clusterd" + "github.com/rook/cassandra/pkg/util" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" diff --git a/pkg/operator/k8sutil/k8sutil.go b/pkg/operator/k8sutil/k8sutil.go index f3a44c76b..8f9c8b0b7 100644 --- a/pkg/operator/k8sutil/k8sutil.go +++ b/pkg/operator/k8sutil/k8sutil.go @@ -27,8 +27,8 @@ import ( "github.com/coreos/pkg/capnslog" "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" - rookversion "github.com/rook/rook/pkg/version" + "github.com/rook/cassandra/pkg/clusterd" + rookversion "github.com/rook/cassandra/pkg/version" v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,7 +38,7 @@ import ( "k8s.io/client-go/tools/cache" ) -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-k8sutil") +var logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "op-k8sutil") const ( // Namespace for rook diff --git a/pkg/operator/k8sutil/network.go b/pkg/operator/k8sutil/network.go deleted file mode 100644 index 7c7c53ccd..000000000 --- a/pkg/operator/k8sutil/network.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8sutil - -import ( - "encoding/json" - "fmt" - "sort" - "strings" - - netapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // publicNetworkSelectorKeyName is the network selector key for the ceph public network - publicNetworkSelectorKeyName = "public" - // clusterNetworkSelectorKeyName is the network selector key for the ceph cluster network - clusterNetworkSelectorKeyName = "cluster" -) - -// NetworkAttachmentConfig represents the configuration of the NetworkAttachmentDefinitions object -type NetworkAttachmentConfig struct { - CniVersion string `json:"cniVersion,omitempty"` - Type string `json:"type,omitempty"` - Master string `json:"master,omitempty"` - Mode string `json:"mode,omitempty"` - Ipam struct { - Type string `json:"type,omitempty"` - Subnet string `json:"subnet,omitempty"` - Addresses []struct { - Address string `json:"address,omitempty"` - Gateway string `json:"gateway,omitempty"` - } `json:"addresses,omitempty"` - Ranges [][]struct { - Subnet string `json:"subnet,omitempty"` - RangeStart string `json:"rangeStart,omitempty"` - RangeEnd string `json:"rangeEnd,omitempty"` - Gateway string `json:"gateway,omitempty"` - } `json:"ranges,omitempty"` - Range string `json:"range,omitempty"` - RangeStart string `json:"rangeStart,omitempty"` - RangeEnd string `json:"rangeEnd,omitempty"` - Routes []struct { - Dst string `json:"dst,omitempty"` - } `json:"routes,omitempty"` - Gateway string `json:"gateway,omitempty"` - } `json:"ipam,omitempty"` -} - -// ApplyMultus apply multus selector to Pods -// Multus supports short and json syntax, use only one kind at a time. -func ApplyMultus(net cephv1.NetworkSpec, objectMeta *metav1.ObjectMeta) error { - v := make([]string, 0, 2) - shortSyntax := false - jsonSyntax := false - - for k, ns := range net.Selectors { - var multusMap map[string]string - err := json.Unmarshal([]byte(ns), &multusMap) - - if err == nil { - jsonSyntax = true - } else { - shortSyntax = true - } - - var isExcluded bool - for _, clusterNetworkApps := range getClusterNetworkApps() { - isExcluded = strings.Contains(objectMeta.Labels["app"], clusterNetworkApps) - } - if isExcluded { - v = append(v, string(ns)) - } else { - if k == publicNetworkSelectorKeyName { - v = append(v, string(ns)) - } - } - } - - if shortSyntax && jsonSyntax { - return fmt.Errorf("ApplyMultus: Can't mix short and JSON form") - } - - // Sort network strings so that pods/deployments won't need updated in a loop if nothing changes - sort.Strings(v) - - networks := strings.Join(v, ", ") - if jsonSyntax { - networks = "[" + networks + "]" - } - - t := rook.Annotations{ - "k8s.v1.cni.cncf.io/networks": networks, - } - t.ApplyToObjectMeta(objectMeta) - - return nil -} - -// getClusterNetworkApps returns the list of ceph apps that utilize cluster network -func getClusterNetworkApps() []string { - return []string{"osd"} -} - -// GetNetworkAttachmentConfig returns the NetworkAttachmentDefinitions configuration -func GetNetworkAttachmentConfig(n netapi.NetworkAttachmentDefinition) (NetworkAttachmentConfig, error) { - netConfigJSON := n.Spec.Config - var netConfig NetworkAttachmentConfig - - err := json.Unmarshal([]byte(netConfigJSON), &netConfig) - if err != nil { - return netConfig, fmt.Errorf("failed to unmarshal netconfig json %q. %v", netConfigJSON, err) - } - - return netConfig, nil -} diff --git a/pkg/operator/k8sutil/network_test.go b/pkg/operator/k8sutil/network_test.go deleted file mode 100644 index 7ad5b0f01..000000000 --- a/pkg/operator/k8sutil/network_test.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8sutil - -import ( - "testing" - - netapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestApplyMultus(t *testing.T) { - t.Run("short format", func(t *testing.T) { - tests := []struct { - name string - netSelectors map[string]string - labels map[string]string - want string - }{ - { - name: "no applicable networks for non-osd pod", - netSelectors: map[string]string{ - "unknown": "macvlan@net1", - }, - want: "", - }, - { - name: "for a non-osd pod", - netSelectors: map[string]string{ - publicNetworkSelectorKeyName: "macvlan@net1", - clusterNetworkSelectorKeyName: "macvlan@net2", - }, - want: "macvlan@net1", - }, - { - name: "for an osd pod", - netSelectors: map[string]string{ - publicNetworkSelectorKeyName: "macvlan@net1", - clusterNetworkSelectorKeyName: "macvlan@net2", - }, - labels: map[string]string{"app": "rook-ceph-osd"}, - want: "macvlan@net1, macvlan@net2", - }, - { - name: "for an osd pod (reverse ordering)", - netSelectors: map[string]string{ - publicNetworkSelectorKeyName: "macvlan@net2", - clusterNetworkSelectorKeyName: "macvlan@net1", - }, - labels: map[string]string{"app": "rook-ceph-osd"}, - want: "macvlan@net1, macvlan@net2", // should not change the order of output - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - net := cephv1.NetworkSpec{ - Provider: "multus", - Selectors: test.netSelectors, - } - objMeta := metav1.ObjectMeta{} - objMeta.Labels = test.labels - err := ApplyMultus(net, &objMeta) - assert.NoError(t, err) - assert.Equal(t, test.want, objMeta.Annotations["k8s.v1.cni.cncf.io/networks"]) - }) - } - }) - - t.Run("JSON format", func(t *testing.T) { - json1 := `{"name": "macvlan", "interface": "net1"}` - json2 := `{"name": "macvlan", "interface": "net2"}` - - t.Run("no applicable networks for non-osd pod", func(t *testing.T) { - net := cephv1.NetworkSpec{ - Provider: "multus", - Selectors: map[string]string{ - "server": json1, - "broker": json2, - }, - } - objMeta := metav1.ObjectMeta{} - err := ApplyMultus(net, &objMeta) - assert.NoError(t, err) - // non-osd pods should not get any network annotations here - assert.Equal(t, "[]", objMeta.Annotations["k8s.v1.cni.cncf.io/networks"]) - }) - - t.Run("for a non-osd pod", func(t *testing.T) { - net := cephv1.NetworkSpec{ - Provider: "multus", - Selectors: map[string]string{ - "public": json1, - "cluster": json2, - }, - } - objMeta := metav1.ObjectMeta{} - err := ApplyMultus(net, &objMeta) - assert.NoError(t, err) - // non-osd pods should only get public networks - assert.Equal(t, "["+json1+"]", objMeta.Annotations["k8s.v1.cni.cncf.io/networks"]) - }) - - t.Run("for an osd pod", func(t *testing.T) { - net := cephv1.NetworkSpec{ - Provider: "multus", - Selectors: map[string]string{ - "server": json1, - "broker": json2, - }, - } - objMeta := metav1.ObjectMeta{ - Labels: map[string]string{ - "app": "rook-ceph-osd", - }, - } - err := ApplyMultus(net, &objMeta) - assert.NoError(t, err) - assert.Equal(t, "["+json1+", "+json2+"]", objMeta.Annotations["k8s.v1.cni.cncf.io/networks"]) - }) - - t.Run("for an osd pod (reverse ordering)", func(t *testing.T) { - net := cephv1.NetworkSpec{ - Provider: "multus", - Selectors: map[string]string{ - "server": json2, - "broker": json1, - }, - } - objMeta := metav1.ObjectMeta{ - Labels: map[string]string{ - "app": "rook-ceph-osd", - }, - } - err := ApplyMultus(net, &objMeta) - assert.NoError(t, err) - // should not change the order of output - assert.Equal(t, "["+json1+", "+json2+"]", objMeta.Annotations["k8s.v1.cni.cncf.io/networks"]) - }) - }) - - t.Run("mixed format (error)", func(t *testing.T) { - net := cephv1.NetworkSpec{ - Provider: "multus", - Selectors: map[string]string{ - "server": `{"name": "macvlan", "interface": "net1"}`, - "broker": `macvlan@net2`, - }, - } - - objMeta := metav1.ObjectMeta{} - err := ApplyMultus(net, &objMeta) - - assert.Error(t, err) - }) -} - -func TestGetNetworkAttachmentConfig(t *testing.T) { - dummyNetAttachDef := netapi.NetworkAttachmentDefinition{ - Spec: netapi.NetworkAttachmentDefinitionSpec{ - Config: `{ - "cniVersion": "0.3.0", - "type": "macvlan", - "master": "eth2", - "mode": "bridge", - "ipam": { - "type": "host-local", - "subnet": "172.18.8.0/24", - "rangeStart": "172.18.8.200", - "rangeEnd": "172.18.8.216", - "routes": [ - { - "dst": "0.0.0.0/0" - } - ], - "gateway": "172.18.8.1" - } - }`, - }, - } - - config, err := GetNetworkAttachmentConfig(dummyNetAttachDef) - assert.NoError(t, err) - assert.Equal(t, "172.18.8.0/24", config.Ipam.Subnet) -} diff --git a/pkg/operator/k8sutil/node.go b/pkg/operator/k8sutil/node.go deleted file mode 100644 index 284171c84..000000000 --- a/pkg/operator/k8sutil/node.go +++ /dev/null @@ -1,378 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package k8sutil for Kubernetes helpers. -package k8sutil - -import ( - "context" - "fmt" - "strings" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/client-go/kubernetes" -) - -// ValidNodeNoSched returns true if the node (1) meets Rook's placement terms, -// and (2) is ready. Unlike ValidNode, this method will ignore the -// Node.Spec.Unschedulable flag. False otherwise. -func ValidNodeNoSched(node v1.Node, placement cephv1.Placement) (bool, error) { - p, err := NodeMeetsPlacementTerms(node, placement, false) - if err != nil { - return false, fmt.Errorf("failed to check if node meets Rook placement terms. %+v", err) - } - if !p { - return false, nil - } - - if !NodeIsReady(node) { - return false, nil - } - - return true, nil -} - -// ValidNode returns true if the node (1) is schedulable, (2) meets Rook's placement terms, and -// (3) is ready. False otherwise. -func ValidNode(node v1.Node, placement cephv1.Placement) (bool, error) { - if !GetNodeSchedulable(node) { - return false, nil - } - - return ValidNodeNoSched(node, placement) -} - -// GetValidNodes returns all nodes that (1) are not cordoned, (2) meet Rook's placement terms, and -// (3) are ready. -func GetValidNodes(rookStorage cephv1.StorageScopeSpec, clientset kubernetes.Interface, placement cephv1.Placement) []cephv1.Node { - matchingK8sNodes, err := GetKubernetesNodesMatchingRookNodes(rookStorage.Nodes, clientset) - if err != nil { - // cannot list nodes, return empty nodes - logger.Errorf("failed to list nodes: %+v", err) - return []cephv1.Node{} - } - - validK8sNodes := []v1.Node{} - for _, n := range matchingK8sNodes { - valid, err := ValidNode(n, placement) - if err != nil { - logger.Errorf("failed to validate node %s. %+v", n.Name, err) - } else if valid { - validK8sNodes = append(validK8sNodes, n) - } - } - - return RookNodesMatchingKubernetesNodes(rookStorage, validK8sNodes) -} - -// GetNodeNameFromHostname returns the name of the node resource looked up by the hostname label -// Typically these will be the same name, but sometimes they are not such as when nodes have a longer -// dns name, but the hostname is short. -func GetNodeNameFromHostname(clientset kubernetes.Interface, hostName string) (string, error) { - ctx := context.TODO() - options := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", v1.LabelHostname, hostName)} - nodes, err := clientset.CoreV1().Nodes().List(ctx, options) - if err != nil { - return hostName, err - } - - for _, node := range nodes.Items { - return node.Name, nil - } - return hostName, fmt.Errorf("node not found") -} - -// GetNodeHostName returns the hostname label given the node name. -func GetNodeHostName(clientset kubernetes.Interface, nodeName string) (string, error) { - ctx := context.TODO() - node, err := clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) - if err != nil { - return "", err - } - return GetNodeHostNameLabel(node) -} - -func GetNodeHostNameLabel(node *v1.Node) (string, error) { - hostname, ok := node.Labels[v1.LabelHostname] - if !ok { - return "", fmt.Errorf("hostname not found on the node") - } - return hostname, nil -} - -// GetNodeHostNames returns the name of the node resource mapped to their hostname label. -// Typically these will be the same name, but sometimes they are not such as when nodes have a longer -// dns name, but the hostname is short. -func GetNodeHostNames(clientset kubernetes.Interface) (map[string]string, error) { - ctx := context.TODO() - nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - - nodeMap := map[string]string{} - for _, node := range nodes.Items { - nodeMap[node.Name] = node.Labels[v1.LabelHostname] - } - return nodeMap, nil -} - -// GetNodeSchedulable returns a boolean if the node is tainted as Schedulable or not -// true -> Node is schedulable -// false -> Node is unschedulable -func GetNodeSchedulable(node v1.Node) bool { - // some unit tests set this to quickly emulate an unschedulable node; if this is set to true, - // we can shortcut deeper inspection for schedulability. - return !node.Spec.Unschedulable -} - -// NodeMeetsPlacementTerms returns true if the Rook placement allows the node to have resources scheduled -// on it. A node is placeable if it (1) meets any affinity terms that may be set in the placement, -// and (2) its taints are tolerated by the placements tolerations. -// There is the option to ignore well known taints defined in WellKnownTaints. See WellKnownTaints -// for more information. -func NodeMeetsPlacementTerms(node v1.Node, placement cephv1.Placement, ignoreWellKnownTaints bool) (bool, error) { - a, err := NodeMeetsAffinityTerms(node, placement.NodeAffinity) - if err != nil { - return false, fmt.Errorf("failed to check if node %s meets affinity terms. regarding as not match. %+v", node.Name, err) - } - if !a { - return false, nil - } - if !NodeIsTolerable(node, placement.Tolerations, ignoreWellKnownTaints) { - return false, nil - } - return true, nil -} - -// NodeMeetsAffinityTerms returns true if the node meets the terms of the node affinity. -// `PreferredDuringSchedulingIgnoredDuringExecution` terms are ignored and not used to judge a -// node's usability. -func NodeMeetsAffinityTerms(node v1.Node, affinity *v1.NodeAffinity) (bool, error) { - // Terms are met automatically if relevant terms aren't set - if affinity == nil || affinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { - return true, nil - } - for _, req := range affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { - nodeSelector, err := nodeSelectorRequirementsAsSelector(req.MatchExpressions) - if err != nil { - return false, fmt.Errorf("failed to parse affinity MatchExpressions: %+v, regarding as not match. %+v", req.MatchExpressions, err) - } - if nodeSelector.Matches(labels.Set(node.Labels)) { - return true, nil - } - } - return false, nil -} - -// nodeSelectorRequirementsAsSelector method is copied from https://github.com/kubernetes/kubernetes. Since Rook uses this method and in -// Kubernetes v1.20.0 this method is not exported. - -// nodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements -// labels.Selector. -func nodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) { - if len(nsm) == 0 { - return labels.Nothing(), nil - } - selector := labels.NewSelector() - for _, expr := range nsm { - var op selection.Operator - switch expr.Operator { - case v1.NodeSelectorOpIn: - op = selection.In - case v1.NodeSelectorOpNotIn: - op = selection.NotIn - case v1.NodeSelectorOpExists: - op = selection.Exists - case v1.NodeSelectorOpDoesNotExist: - op = selection.DoesNotExist - case v1.NodeSelectorOpGt: - op = selection.GreaterThan - case v1.NodeSelectorOpLt: - op = selection.LessThan - default: - return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) - } - r, err := labels.NewRequirement(expr.Key, op, expr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - return selector, nil -} - -// NodeIsTolerable returns true if the node's taints are all tolerated by the given tolerations. -// There is the option to ignore well known taints defined in WellKnownTaints. See WellKnownTaints -// for more information. -func NodeIsTolerable(node v1.Node, tolerations []v1.Toleration, ignoreWellKnownTaints bool) bool { - for _, taint := range node.Spec.Taints { - if ignoreWellKnownTaints && TaintIsWellKnown(taint) { - continue - } - isTolerated := false - for _, toleration := range tolerations { - localtaint := taint - if toleration.ToleratesTaint(&localtaint) { - isTolerated = true - break - } - } - if !isTolerated { - return false - } - } - return true -} - -// NodeIsReady returns true if the node is ready. It returns false if the node is not ready. -func NodeIsReady(node v1.Node) bool { - for _, c := range node.Status.Conditions { - if c.Type == v1.NodeReady && c.Status == v1.ConditionTrue { - return true - } - } - return false -} - -func rookNodeMatchesKubernetesNode(rookNode cephv1.Node, kubernetesNode v1.Node) bool { - hostname := normalizeHostname(kubernetesNode) - return rookNode.Name == hostname || rookNode.Name == kubernetesNode.Name -} - -func normalizeHostname(kubernetesNode v1.Node) string { - hostname := kubernetesNode.Labels[v1.LabelHostname] - if len(hostname) == 0 { - // fall back to the node name if the hostname label is not set - hostname = kubernetesNode.Name - } - return hostname -} - -// GetKubernetesNodesMatchingRookNodes lists all the nodes in Kubernetes and returns all the -// Kubernetes nodes that have a corresponding match in the list of Rook nodes. -func GetKubernetesNodesMatchingRookNodes(rookNodes []cephv1.Node, clientset kubernetes.Interface) ([]v1.Node, error) { - ctx := context.TODO() - nodes := []v1.Node{} - k8sNodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - if err != nil { - return nodes, fmt.Errorf("failed to list kubernetes nodes. %+v", err) - } - for _, kn := range k8sNodes.Items { - for _, rn := range rookNodes { - if rookNodeMatchesKubernetesNode(rn, kn) { - nodes = append(nodes, kn) - } - } - } - return nodes, nil -} - -// GetNotReadyKubernetesNodes lists all the nodes that are in NotReady state -func GetNotReadyKubernetesNodes(clientset kubernetes.Interface) ([]v1.Node, error) { - ctx := context.TODO() - nodes := []v1.Node{} - k8sNodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - if err != nil { - return nodes, fmt.Errorf("failed to list kubernetes nodes. %v", err) - } - for _, node := range k8sNodes.Items { - if !NodeIsReady(node) { - nodes = append(nodes, node) - } - } - return nodes, nil -} - -// RookNodesMatchingKubernetesNodes returns only the given Rook nodes which have a corresponding -// match in the list of Kubernetes nodes. -func RookNodesMatchingKubernetesNodes(rookStorage cephv1.StorageScopeSpec, kubernetesNodes []v1.Node) []cephv1.Node { - nodes := []cephv1.Node{} - for _, kn := range kubernetesNodes { - for _, rn := range rookStorage.Nodes { - if rookNodeMatchesKubernetesNode(rn, kn) { - rn.Name = normalizeHostname(kn) - nodes = append(nodes, rn) - } - } - } - return nodes -} - -// GenerateNodeAffinity will return v1.NodeAffinity or error -func GenerateNodeAffinity(nodeAffinity string) (*v1.NodeAffinity, error) { - newNodeAffinity := &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - {}, - }, - }, - } - nodeLabels := strings.Split(nodeAffinity, ";") - // For each label in 'nodeLabels', retrieve (key,value) pair and create nodeAffinity - // '=' separates key from values - // ',' separates values - for _, nodeLabel := range nodeLabels { - // If tmpNodeLabel is an array of length > 1 - // [0] is Key and [1] is comma separated values - tmpNodeLabel := strings.Split(nodeLabel, "=") - if len(tmpNodeLabel) > 1 { - nodeLabelKey := strings.Trim(tmpNodeLabel[0], " ") - tmpNodeLabelValue := tmpNodeLabel[1] - nodeLabelValues := strings.Split(tmpNodeLabelValue, ",") - if nodeLabelKey != "" && len(nodeLabelValues) > 0 { - err := validation.IsQualifiedName(nodeLabelKey) - if err != nil { - return nil, fmt.Errorf("invalid label key: %s err: %v", nodeLabelKey, err) - } - for _, nodeLabelValue := range nodeLabelValues { - nodeLabelValue = strings.Trim(nodeLabelValue, " ") - err := validation.IsValidLabelValue(nodeLabelValue) - if err != nil { - return nil, fmt.Errorf("invalid label value: %s err: %v", nodeLabelValue, err) - } - } - matchExpression := v1.NodeSelectorRequirement{ - Key: nodeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: nodeLabelValues, - } - newNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions = - append(newNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions, matchExpression) - } - } else { - nodeLabelKey := strings.Trim(tmpNodeLabel[0], " ") - if nodeLabelKey != "" { - err := validation.IsQualifiedName(nodeLabelKey) - if err != nil { - return nil, fmt.Errorf("invalid label key: %s err: %v", nodeLabelKey, err) - } - matchExpression := v1.NodeSelectorRequirement{ - Key: nodeLabelKey, - Operator: v1.NodeSelectorOpExists, - } - newNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions = - append(newNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions, matchExpression) - } - } - } - return newNodeAffinity, nil -} diff --git a/pkg/operator/k8sutil/node_test.go b/pkg/operator/k8sutil/node_test.go deleted file mode 100644 index 9b98f6da4..000000000 --- a/pkg/operator/k8sutil/node_test.go +++ /dev/null @@ -1,423 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package k8sutil for Kubernetes helpers. -package k8sutil - -import ( - "context" - "reflect" - "testing" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - optest "github.com/rook/rook/pkg/operator/test" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" -) - -func createNode(nodeName string, condition v1.NodeConditionType, clientset *fake.Clientset) error { - ctx := context.TODO() - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: condition, Status: v1.ConditionTrue, - }, - }, - }, - } - _, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) - return err -} - -func TestValidNode(t *testing.T) { - nodeA := "nodeA" - nodeB := "nodeB" - - storage := cephv1.StorageScopeSpec{ - Nodes: []cephv1.Node{ - { - Name: nodeA, - }, - { - Name: nodeB, - }, - }, - } - var placement cephv1.Placement - // set up a fake k8s client set and watcher to generate events that the operator will listen to - clientset := fake.NewSimpleClientset() - - nodeErr := createNode(nodeA, v1.NodeReady, clientset) - assert.Nil(t, nodeErr) - nodeErr = createNode(nodeB, v1.NodeNetworkUnavailable, clientset) - assert.Nil(t, nodeErr) - validNodes := GetValidNodes(storage, clientset, placement) - assert.Equal(t, len(validNodes), 1) -} - -func testNode(taints []v1.Taint) v1.Node { - n := v1.Node{} - n.Spec.Taints = append(n.Spec.Taints, taints...) - return n -} - -func taintReservedForRook() v1.Taint { - return v1.Taint{Key: "reservedForRook", Effect: v1.TaintEffectNoSchedule} -} - -func taintReservedForOther() v1.Taint { - return v1.Taint{Key: "reservedForNOTRook", Effect: v1.TaintEffectNoSchedule} -} - -func taintAllWellKnown() []v1.Taint { - taints := []v1.Taint{} - for _, t := range WellKnownTaints { - taints = append(taints, v1.Taint{ - // assume the "worst" with NoExecute - Key: t, Effect: v1.TaintEffectNoExecute, - }) - } - return taints -} - -func taints(taints ...v1.Taint) []v1.Taint { - list := []v1.Taint{} - for _, t := range taints { - list = append(taints, t) - } - return list -} - -func tolerateRook() []v1.Toleration { - return []v1.Toleration{{Key: "reservedForRook"}} -} - -func TestNodeIsTolerable(t *testing.T) { - type args struct { - node v1.Node - tolerations []v1.Toleration - ignoreWellKnownTaints bool - } - tests := []struct { - name string - args args - want bool - }{ - {name: "tolerate node w/o taints", args: args{ - node: v1.Node{}, - tolerations: tolerateRook(), - ignoreWellKnownTaints: false, - }, want: true}, - {name: "tolerate node w/ rook taint", args: args{ - node: testNode(taints(taintReservedForRook())), - tolerations: tolerateRook(), - ignoreWellKnownTaints: false, - }, want: true}, - {name: "do not tolerate rook taint", args: args{ - node: testNode(taints(taintReservedForRook())), - tolerations: nil, - ignoreWellKnownTaints: false, - }, want: false}, - {name: "do not tolerate other taint", args: args{ - node: testNode(taints(taintReservedForRook(), taintReservedForOther())), - tolerations: tolerateRook(), - ignoreWellKnownTaints: false, - }, want: false}, - {name: "do not tolerate node w/ known taints", args: args{ - node: testNode(taintAllWellKnown()), - tolerations: nil, - ignoreWellKnownTaints: false, - }, want: false}, - {name: "do not tolerate node w/ known taints 2", args: args{ - node: testNode(taintAllWellKnown()), - tolerations: tolerateRook(), - ignoreWellKnownTaints: false, - }, want: false}, - {name: "tolerate node w/ known taints and rook taint", args: args{ - node: testNode(taintAllWellKnown()), - tolerations: tolerateRook(), - ignoreWellKnownTaints: true, - }, want: true}, - {name: "do not tolerate node w/ known taints and rook taint", args: args{ - node: testNode(append(taintAllWellKnown(), taintReservedForRook())), - tolerations: nil, - ignoreWellKnownTaints: true, - }, want: false}, - {name: "tolerate node w/ known taints and rook taint", args: args{ - node: testNode(append(taintAllWellKnown(), taintReservedForRook())), - tolerations: tolerateRook(), - ignoreWellKnownTaints: true, - }, want: true}, - {name: "do not tolerate node w/ known and other taints", args: args{ - node: testNode(append(taintAllWellKnown(), taintReservedForRook(), taintReservedForOther())), - tolerations: tolerateRook(), - ignoreWellKnownTaints: true, - }, want: false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := NodeIsTolerable(tt.args.node, tt.args.tolerations, tt.args.ignoreWellKnownTaints); got != tt.want { - t.Errorf("NodeIsTolerable() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestNodeIsReady(t *testing.T) { - assert.True(t, NodeIsReady(v1.Node{Status: v1.NodeStatus{Conditions: []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionTrue}, - }}})) - assert.False(t, NodeIsReady(v1.Node{Status: v1.NodeStatus{Conditions: []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionFalse}, - }}})) - assert.False(t, NodeIsReady(v1.Node{Status: v1.NodeStatus{Conditions: []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionUnknown}, - }}})) - // if `Ready` condition does not exist, must assume that node is not ready - assert.False(t, NodeIsReady(v1.Node{Status: v1.NodeStatus{Conditions: []v1.NodeCondition{ - {Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}, - }}})) - // if `Ready` condition is not accompanied by a status, must assume that node is not ready - assert.False(t, NodeIsReady(v1.Node{Status: v1.NodeStatus{Conditions: []v1.NodeCondition{ - {Type: v1.NodeDiskPressure}, - }}})) -} - -func TestGetRookNodesMatchingKubernetesNodes(t *testing.T) { - ctx := context.TODO() - clientset := optest.New(t, 3) // create nodes 0, 1, and 2 - rookNodes := []cephv1.Node{} - - getNode := func(name string) v1.Node { - n, err := clientset.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}) - assert.NoError(t, err) - return *n - } - - // no rook nodes specified - nodes, err := GetKubernetesNodesMatchingRookNodes(rookNodes, clientset) - assert.NoError(t, err) - assert.Empty(t, nodes) - - // more rook nodes specified than nodes exist - rookNodes = []cephv1.Node{ - {Name: "node0"}, - {Name: "node2"}, - {Name: "node5"}} - nodes, err = GetKubernetesNodesMatchingRookNodes(rookNodes, clientset) - assert.NoError(t, err) - assert.Len(t, nodes, 2) - assert.Contains(t, nodes, getNode("node0")) - assert.Contains(t, nodes, getNode("node2")) - - // rook nodes match k8s nodes - rookNodes = []cephv1.Node{ - {Name: "node0"}, - {Name: "node1"}, - {Name: "node2"}} - nodes, err = GetKubernetesNodesMatchingRookNodes(rookNodes, clientset) - assert.NoError(t, err) - assert.Len(t, nodes, 3) - assert.Contains(t, nodes, getNode("node0")) - assert.Contains(t, nodes, getNode("node1")) - assert.Contains(t, nodes, getNode("node2")) - - // no k8s nodes exist - clientset = optest.New(t, 0) - nodes, err = GetKubernetesNodesMatchingRookNodes(rookNodes, clientset) - assert.NoError(t, err) - assert.Len(t, nodes, 0) -} - -func TestRookNodesMatchingKubernetesNodes(t *testing.T) { - ctx := context.TODO() - clientset := optest.New(t, 3) // create nodes 0, 1, and 2 - - getNode := func(name string) v1.Node { - n, err := clientset.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}) - assert.NoError(t, err) - return *n - } - n0 := getNode("node0") - n0.Labels = map[string]string{v1.LabelHostname: "node0-hostname"} - n1 := getNode("node1") - n2 := getNode("node2") - n2.Labels = map[string]string{v1.LabelHostname: "node2"} - k8sNodes := []v1.Node{n0, n1, n2} - - // no rook nodes specified for input - rookStorage := cephv1.StorageScopeSpec{ - Nodes: []cephv1.Node{}, - } - retNodes := RookNodesMatchingKubernetesNodes(rookStorage, k8sNodes) - assert.Len(t, retNodes, 0) - - // all rook nodes specified - rookStorage.Nodes = []cephv1.Node{ - {Name: "node0"}, - {Name: "node1"}, - {Name: "node2"}} - retNodes = RookNodesMatchingKubernetesNodes(rookStorage, k8sNodes) - assert.Len(t, retNodes, 3) - // this should return nodes named by hostname if that is available - assert.Contains(t, retNodes, cephv1.Node{Name: "node0-hostname"}) - assert.Contains(t, retNodes, cephv1.Node{Name: "node1"}) - assert.Contains(t, retNodes, cephv1.Node{Name: "node2"}) - - // more rook nodes specified than exist - rookStorage.Nodes = []cephv1.Node{ - {Name: "node0-hostname"}, - {Name: "node2"}, - {Name: "node5"}} - retNodes = RookNodesMatchingKubernetesNodes(rookStorage, k8sNodes) - assert.Len(t, retNodes, 2) - assert.Contains(t, retNodes, cephv1.Node{Name: "node0-hostname"}) - assert.Contains(t, retNodes, cephv1.Node{Name: "node2"}) - - // no k8s nodes specified - retNodes = RookNodesMatchingKubernetesNodes(rookStorage, []v1.Node{}) - assert.Len(t, retNodes, 0) -} - -func TestGenerateNodeAffinity(t *testing.T) { - type args struct { - nodeAffinity string - } - tests := []struct { - name string - args args - want *v1.NodeAffinity - wantErr bool - }{ - { - name: "GenerateNodeAffinity", - args: args{ - nodeAffinity: "rook.io/ceph=true", - }, - want: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "rook.io/ceph", - Operator: v1.NodeSelectorOpIn, - Values: []string{"true"}, - }, - }, - }, - }, - }, - }, - wantErr: false, - }, - { - name: "FailGenerateNodeAffinity", - args: args{ - nodeAffinity: "rook.io/ceph,cassandra=true", - }, - want: nil, - wantErr: true, - }, - { - name: "GenerateNodeAffinityWithKeyOnly", - args: args{ - nodeAffinity: "rook.io/ceph", - }, - want: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "rook.io/ceph", - Operator: v1.NodeSelectorOpExists, - }, - }, - }, - }, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := GenerateNodeAffinity(tt.args.nodeAffinity) - if (err != nil) != tt.wantErr { - t.Errorf("GenerateNodeAffinity() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("GenerateNodeAffinity() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetNotReadyKubernetesNodes(t *testing.T) { - ctx := context.TODO() - clientset := optest.New(t, 0) - - //when there is no node - nodes, err := GetNotReadyKubernetesNodes(clientset) - assert.NoError(t, err) - assert.Equal(t, 0, len(nodes)) - - //when all the nodes are in ready state - clientset = optest.New(t, 2) - nodes, err = GetNotReadyKubernetesNodes(clientset) - assert.NoError(t, err) - assert.Equal(t, 0, len(nodes)) - - //when there is a not ready node - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "failed", - }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, Status: v1.ConditionFalse, - }, - }, - }, - } - _, err = clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) - assert.NoError(t, err) - nodes, err = GetNotReadyKubernetesNodes(clientset) - assert.NoError(t, err) - assert.Equal(t, 1, len(nodes)) - - // when all the nodes are not ready - allNodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - for _, n := range allNodes.Items { - n.Status.Conditions[0].Status = v1.ConditionFalse - updateNode := n - _, err := clientset.CoreV1().Nodes().Update(ctx, &updateNode, metav1.UpdateOptions{}) - assert.NoError(t, err) - } - nodes, err = GetNotReadyKubernetesNodes(clientset) - assert.NoError(t, err) - assert.Equal(t, 3, len(nodes)) -} diff --git a/pkg/operator/k8sutil/pod.go b/pkg/operator/k8sutil/pod.go index 8f1315a2a..9f68f86a6 100644 --- a/pkg/operator/k8sutil/pod.go +++ b/pkg/operator/k8sutil/pod.go @@ -27,7 +27,6 @@ import ( "strings" "github.com/pkg/errors" - "github.com/rook/rook/pkg/clusterd" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -347,33 +346,6 @@ func SetNodeAntiAffinityForPod(pod *v1.PodSpec, requiredDuringScheduling bool, t } } -func ForceDeletePodIfStuck(clusterdContext *clusterd.Context, pod v1.Pod) error { - ctx := context.TODO() - logger.Debugf("checking if pod %q is stuck and should be force deleted", pod.Name) - if pod.DeletionTimestamp.IsZero() { - logger.Debugf("skipping pod %q restart since the pod is not deleted", pod.Name) - return nil - } - node, err := clusterdContext.Clientset.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, "node status is not available") - } - if NodeIsReady(*node) { - logger.Debugf("skipping restart of pod %q since the node status is ready", pod.Name) - return nil - } - - logger.Infof("force deleting pod %q that appears to be stuck terminating", pod.Name) - var gracePeriod int64 - deleteOpts := metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod} - if err := clusterdContext.Clientset.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, deleteOpts); err != nil { - logger.Warningf("pod %q deletion failed. %v", pod.Name, err) - return nil - } - logger.Infof("pod %q deletion succeeded", pod.Name) - return nil -} - func RemoveDuplicateEnvVars(pod *v1.PodSpec) { for i := range pod.Containers { removeDuplicateEnvVarsFromContainer(&pod.Containers[i]) diff --git a/pkg/operator/k8sutil/pod_test.go b/pkg/operator/k8sutil/pod_test.go index 5bb8e9a56..479538d8f 100644 --- a/pkg/operator/k8sutil/pod_test.go +++ b/pkg/operator/k8sutil/pod_test.go @@ -16,13 +16,9 @@ limitations under the License. package k8sutil import ( - "context" - "fmt" "os" "testing" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/operator/test" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -173,95 +169,3 @@ func TestAddUnreachableNodeToleration(t *testing.T) { assert.Equal(t, expectedURToleration, podSpec.Tolerations[0]) } - -func testPodSpecPlacement(t *testing.T, requiredDuringScheduling bool, req, pref int, placement *cephv1.Placement) { - spec := v1.PodSpec{ - InitContainers: []v1.Container{}, - Containers: []v1.Container{}, - RestartPolicy: v1.RestartPolicyAlways, - } - - placement.ApplyToPodSpec(&spec) - SetNodeAntiAffinityForPod(&spec, requiredDuringScheduling, v1.LabelHostname, map[string]string{"app": "mon"}, nil) - - // should have a required anti-affinity and no preferred anti-affinity - assert.Equal(t, - req, - len(spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution)) -} - -func makePlacement() cephv1.Placement { - return cephv1.Placement{ - PodAntiAffinity: &v1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ - { - TopologyKey: v1.LabelZoneFailureDomain, - }, - }, - PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ - { - PodAffinityTerm: v1.PodAffinityTerm{ - TopologyKey: v1.LabelZoneFailureDomain, - }, - }, - }, - }, - } -} - -func TestPodSpecPlacement(t *testing.T) { - // no placement settings in the crd - p := cephv1.Placement{} - testPodSpecPlacement(t, true, 1, 0, &p) - testPodSpecPlacement(t, false, 0, 1, &p) - testPodSpecPlacement(t, false, 0, 0, &p) - - // crd has other preferred and required anti-affinity setting - p = makePlacement() - testPodSpecPlacement(t, true, 2, 1, &p) - p = makePlacement() - testPodSpecPlacement(t, false, 1, 2, &p) -} - -func TestIsMonScheduled(t *testing.T) { - ctx := context.TODO() - clientset := test.New(t, 1) - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mon-pod", - Namespace: "ns", - Labels: map[string]string{ - "app": "rook-ceph-mon", - "ceph_daemon_id": "a", - }, - }, - } - - // no pods running - isScheduled, err := IsPodScheduled(clientset, "ns", "a") - assert.Error(t, err) - assert.False(t, isScheduled) - - selector := fmt.Sprintf("%s=%s,%s=%s", AppAttr, "rook-ceph-mon", "ceph_daemon_id", "a") - - // unscheduled pod - _, err = clientset.CoreV1().Pods("ns").Create(ctx, &pod, metav1.CreateOptions{}) - assert.NoError(t, err) - isScheduled, err = IsPodScheduled(clientset, "ns", selector) - assert.NoError(t, err) - assert.False(t, isScheduled) - - // scheduled pod - pod.Spec.NodeName = "node0" - _, err = clientset.CoreV1().Pods("ns").Update(ctx, &pod, metav1.UpdateOptions{}) - assert.NoError(t, err) - isScheduled, err = IsPodScheduled(clientset, "ns", selector) - assert.NoError(t, err) - assert.True(t, isScheduled) - - // no pods found - assert.NoError(t, err) - isScheduled, err = IsPodScheduled(clientset, "ns", "b") - assert.Error(t, err) - assert.False(t, isScheduled) -} diff --git a/pkg/operator/k8sutil/prometheus.go b/pkg/operator/k8sutil/prometheus.go deleted file mode 100644 index cf27f074f..000000000 --- a/pkg/operator/k8sutil/prometheus.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package k8sutil for Kubernetes helpers. -package k8sutil - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "path/filepath" - - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - monitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sYAML "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/tools/clientcmd" -) - -func getMonitoringClient() (*monitoringclient.Clientset, error) { - cfg, err := clientcmd.BuildConfigFromFlags("", "") - if err != nil { - return nil, fmt.Errorf("failed to build config. %v", err) - } - client, err := monitoringclient.NewForConfig(cfg) - if err != nil { - return nil, fmt.Errorf("failed to get monitoring client. %v", err) - } - return client, nil -} - -// GetServiceMonitor returns servicemonitor or an error -func GetServiceMonitor(filePath string) (*monitoringv1.ServiceMonitor, error) { - file, err := ioutil.ReadFile(filepath.Clean(filePath)) - if err != nil { - return nil, fmt.Errorf("servicemonitor file could not be fetched. %v", err) - } - var servicemonitor monitoringv1.ServiceMonitor - err = k8sYAML.NewYAMLOrJSONDecoder(bytes.NewBufferString(string(file)), 1000).Decode(&servicemonitor) - if err != nil { - return nil, fmt.Errorf("servicemonitor could not be decoded. %v", err) - } - return &servicemonitor, nil -} - -// CreateOrUpdateServiceMonitor creates serviceMonitor object or an error -func CreateOrUpdateServiceMonitor(serviceMonitorDefinition *monitoringv1.ServiceMonitor) (*monitoringv1.ServiceMonitor, error) { - ctx := context.TODO() - name := serviceMonitorDefinition.GetName() - namespace := serviceMonitorDefinition.GetNamespace() - logger.Debugf("creating servicemonitor %s", name) - client, err := getMonitoringClient() - if err != nil { - return nil, fmt.Errorf("failed to get monitoring client. %v", err) - } - oldSm, err := client.MonitoringV1().ServiceMonitors(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - sm, err := client.MonitoringV1().ServiceMonitors(namespace).Create(ctx, serviceMonitorDefinition, metav1.CreateOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to create servicemonitor. %v", err) - } - return sm, nil - } - return nil, fmt.Errorf("failed to retrieve servicemonitor. %v", err) - } - oldSm.Spec = serviceMonitorDefinition.Spec - sm, err := client.MonitoringV1().ServiceMonitors(namespace).Update(ctx, oldSm, metav1.UpdateOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to update servicemonitor. %v", err) - } - return sm, nil -} - -// GetPrometheusRule returns provided prometheus rules or an error -func GetPrometheusRule(ruleFilePath string) (*monitoringv1.PrometheusRule, error) { - ruleFile, err := ioutil.ReadFile(filepath.Clean(ruleFilePath)) - if err != nil { - return nil, fmt.Errorf("prometheusRules file could not be fetched. %v", err) - } - var rule monitoringv1.PrometheusRule - err = k8sYAML.NewYAMLOrJSONDecoder(bytes.NewBufferString(string(ruleFile)), 1000).Decode(&rule) - if err != nil { - return nil, fmt.Errorf("prometheusRules could not be decoded. %v", err) - } - return &rule, nil -} - -// CreateOrUpdatePrometheusRule creates a prometheusRule object or an error -func CreateOrUpdatePrometheusRule(prometheusRule *monitoringv1.PrometheusRule) (*monitoringv1.PrometheusRule, error) { - ctx := context.TODO() - name := prometheusRule.GetName() - namespace := prometheusRule.GetNamespace() - logger.Debugf("creating prometheusRule %s", name) - client, err := getMonitoringClient() - if err != nil { - return nil, fmt.Errorf("failed to get monitoring client. %v", err) - } - promRule, err := client.MonitoringV1().PrometheusRules(namespace).Create(ctx, prometheusRule, metav1.CreateOptions{}) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return nil, fmt.Errorf("failed to create prometheusRules. %v", err) - } - // Get current PrometheusRule so the ResourceVersion can be set as needed - // for the object update operation - promRule, err := client.MonitoringV1().PrometheusRules(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to get prometheusRule object. %v", err) - } - promRule.Spec = prometheusRule.Spec - _, err = client.MonitoringV1().PrometheusRules(namespace).Update(ctx, promRule, metav1.UpdateOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to update prometheusRule. %v", err) - } - } - return promRule, nil -} diff --git a/pkg/operator/k8sutil/prometheus_test.go b/pkg/operator/k8sutil/prometheus_test.go deleted file mode 100644 index db9ad42cb..000000000 --- a/pkg/operator/k8sutil/prometheus_test.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package k8sutil for Kubernetes helpers. -package k8sutil - -import ( - "path" - "testing" - - "github.com/rook/rook/pkg/util" - "github.com/stretchr/testify/assert" -) - -func TestGetServiceMonitor(t *testing.T) { - projectRoot := util.PathToProjectRoot() - filePath := path.Join(projectRoot, "/cluster/examples/kubernetes/ceph/monitoring/service-monitor.yaml") - servicemonitor, err := GetServiceMonitor(filePath) - assert.Nil(t, err) - assert.Equal(t, "rook-ceph-mgr", servicemonitor.GetName()) - assert.Equal(t, "rook-ceph", servicemonitor.GetNamespace()) - assert.NotNil(t, servicemonitor.Spec.NamespaceSelector.MatchNames) - assert.NotNil(t, servicemonitor.Spec.Endpoints) -} - -func TestGetPrometheusRule(t *testing.T) { - projectRoot := util.PathToProjectRoot() - filePath := path.Join(projectRoot, "/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml") - rules, err := GetPrometheusRule(filePath) - assert.Nil(t, err) - assert.Equal(t, "prometheus-ceph-rules", rules.GetName()) - assert.Equal(t, "rook-ceph", rules.GetNamespace()) - // Labels should be present as they are used by prometheus for identifying rules - assert.NotNil(t, rules.GetLabels()) - assert.NotNil(t, rules.Spec.Groups) -} diff --git a/pkg/operator/k8sutil/test/deployment.go b/pkg/operator/k8sutil/test/deployment.go index 6057c88e7..6ac0fe2b2 100644 --- a/pkg/operator/k8sutil/test/deployment.go +++ b/pkg/operator/k8sutil/test/deployment.go @@ -1,31 +1,9 @@ package test import ( - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/daemon/ceph/client" apps "k8s.io/api/apps/v1" ) -// UpdateDeploymentAndWaitStub returns a stub replacement for the UpdateDeploymentAndWait function -// for unit tests which always returns success (nil). The generated simple clientset doesn't seem to -// handle the Deployment.Update method as expected. The deployment is instead zero-ed out when the -// deployment is updated with an unchanged version, which breaks unit tests. -// In order to still test the UpdateDeploymentAndWait function, the stub function returned will -// append a copy of the deployment used as input to the list of deployments updated. The function -// returns a pointer to this slice which the calling func may use to verify the expected contents of -// deploymentsUpdated based on expected behavior. -func UpdateDeploymentAndWaitStub() ( - stubFunc func(context *clusterd.Context, clusterInfo *client.ClusterInfo, deployment *apps.Deployment, daemonType, daemonName string, skipUpgradeChecks, continueUpgradeAfterChecksEvenIfNotHealthy bool) error, - deploymentsUpdated *[]*apps.Deployment, -) { - deploymentsUpdated = &[]*apps.Deployment{} - stubFunc = func(context *clusterd.Context, clusterInfo *client.ClusterInfo, deployment *apps.Deployment, daemonType, daemonName string, skipUpgradeChecks, continueUpgradeAfterChecksEvenIfNotHealthy bool) error { - *deploymentsUpdated = append(*deploymentsUpdated, deployment) - return nil - } - return stubFunc, deploymentsUpdated -} - // DeploymentNamesUpdated converts a deploymentsUpdated slice into a string slice of deployment names func DeploymentNamesUpdated(deploymentsUpdated *[]*apps.Deployment) []string { ns := []string{} diff --git a/pkg/operator/nfs/controller.go b/pkg/operator/nfs/controller.go deleted file mode 100644 index 68a7c42c4..000000000 --- a/pkg/operator/nfs/controller.go +++ /dev/null @@ -1,323 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "fmt" - "path" - "strings" - "time" - - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - - "github.com/coreos/pkg/capnslog" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - nfsConfigMapPath = "/nfs-ganesha/config" - nfsPort = 2049 - rpcPort = 111 -) - -type NFSServerReconciler struct { - client.Client - Context *clusterd.Context - Scheme *runtime.Scheme - Log *capnslog.PackageLogger - Recorder record.EventRecorder -} - -func (r *NFSServerReconciler) Reconcile(context context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - - instance := &nfsv1alpha1.NFSServer{} - if err := r.Client.Get(context, req.NamespacedName, instance); err != nil { - if errors.IsNotFound(err) { - return reconcile.Result{}, nil - } - - return reconcile.Result{}, err - } - - // Initialize patcher utility and store the initial cr object state to be compare later. - patcher, err := k8sutil.NewPatcher(instance, r.Client) - if err != nil { - return reconcile.Result{}, err - } - - defer func() { - // Always patch the cr object if any changes at the end of each reconciliation. - if err := patcher.Patch(context, instance); err != nil && reterr == nil { - reterr = err - } - }() - - // Add Finalizer if not present - controllerutil.AddFinalizer(instance, nfsv1alpha1.Finalizer) - - // Handle for deletion. Just remove finalizer - if !instance.DeletionTimestamp.IsZero() { - r.Log.Infof("Deleting NFSServer %s in %s namespace", instance.Name, instance.Namespace) - - // no operation since we don't need do anything when nfsserver deleted. - controllerutil.RemoveFinalizer(instance, nfsv1alpha1.Finalizer) - } - - // Check status state. if it's empty then initialize it - // otherwise if has error state then skip reconciliation to prevent requeue on error. - switch instance.Status.State { - case "": - instance.Status.State = nfsv1alpha1.StateInitializing - r.Log.Info("Initialize status state") - return reconcile.Result{Requeue: true}, nil - case nfsv1alpha1.StateError: - r.Log.Info("Error state detected, skip reconciliation") - return reconcile.Result{Requeue: false}, nil - } - - // Validate cr spec and give warning event when validation fail. - if err := instance.ValidateSpec(); err != nil { - r.Recorder.Eventf(instance, corev1.EventTypeWarning, nfsv1alpha1.EventFailed, "Invalid NFSServer spec: %+v", err) - r.Log.Errorf("Invalid NFSServer spec: %+v", err) - instance.Status.State = nfsv1alpha1.StateError - return reconcile.Result{}, err - } - - if err := r.reconcileNFSServerConfig(context, instance); err != nil { - r.Recorder.Eventf(instance, corev1.EventTypeWarning, nfsv1alpha1.EventFailed, "Failed reconciling nfsserver config: %+v", err) - r.Log.Errorf("Error reconciling nfsserver config: %+v", err) - return reconcile.Result{}, err - } - - if err := r.reconcileNFSServer(context, instance); err != nil { - r.Recorder.Eventf(instance, corev1.EventTypeWarning, nfsv1alpha1.EventFailed, "Failed reconciling nfsserver: %+v", err) - r.Log.Errorf("Error reconciling nfsserver: %+v", err) - return reconcile.Result{}, err - } - - // Reconcile status state based on statefulset ready replicas. - sts := &appsv1.StatefulSet{} - if err := r.Client.Get(context, req.NamespacedName, sts); err != nil { - return reconcile.Result{}, client.IgnoreNotFound(err) - } - - switch int(sts.Status.ReadyReplicas) { - case instance.Spec.Replicas: - instance.Status.State = nfsv1alpha1.StateRunning - return reconcile.Result{}, nil - default: - instance.Status.State = nfsv1alpha1.StatePending - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } -} - -func (r *NFSServerReconciler) reconcileNFSServerConfig(ctx context.Context, cr *nfsv1alpha1.NFSServer) error { - var exportsList []string - - id := 10 - for _, export := range cr.Spec.Exports { - claimName := export.PersistentVolumeClaim.ClaimName - var accessType string - // validateNFSServerSpec guarantees `access` will be one of these values at this point - switch strings.ToLower(export.Server.AccessMode) { - case "readwrite": - accessType = "RW" - case "readonly": - accessType = "RO" - case "none": - accessType = "None" - } - - nfsGaneshaConfig := ` -EXPORT { - Export_Id = ` + fmt.Sprintf("%v", id) + `; - Path = ` + path.Join("/", claimName) + `; - Pseudo = ` + path.Join("/", claimName) + `; - Protocols = 4; - Transports = TCP; - Sectype = sys; - Access_Type = ` + accessType + `; - Squash = ` + strings.ToLower(export.Server.Squash) + `; - FSAL { - Name = VFS; - } -}` - - exportsList = append(exportsList, nfsGaneshaConfig) - id++ - } - - nfsGaneshaAdditionalConfig := ` -NFS_Core_Param { - fsid_device = true; -} -` - - exportsList = append(exportsList, nfsGaneshaAdditionalConfig) - configdata := make(map[string]string) - configdata[cr.Name] = strings.Join(exportsList, "\n") - cm := newConfigMapForNFSServer(cr) - cmop, err := controllerutil.CreateOrUpdate(ctx, r.Client, cm, func() error { - if err := controllerutil.SetOwnerReference(cr, cm, r.Scheme); err != nil { - return err - } - - cm.Data = configdata - return nil - }) - - if err != nil { - return err - } - - r.Log.Info("Reconciling NFSServer ConfigMap", "Operation.Result ", cmop) - switch cmop { - case controllerutil.OperationResultCreated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventCreated, "%s nfs-server config configmap: %s", strings.Title(string(cmop)), cm.Name) - return nil - case controllerutil.OperationResultUpdated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventUpdated, "%s nfs-server config configmap: %s", strings.Title(string(cmop)), cm.Name) - return nil - default: - return nil - } -} - -func (r *NFSServerReconciler) reconcileNFSServer(ctx context.Context, cr *nfsv1alpha1.NFSServer) error { - svc := newServiceForNFSServer(cr) - svcop, err := controllerutil.CreateOrUpdate(ctx, r.Client, svc, func() error { - if !svc.ObjectMeta.CreationTimestamp.IsZero() { - return nil - } - - if err := controllerutil.SetControllerReference(cr, svc, r.Scheme); err != nil { - return err - } - - return nil - }) - - if err != nil { - return err - } - - r.Log.Info("Reconciling NFSServer Service", "Operation.Result ", svcop) - switch svcop { - case controllerutil.OperationResultCreated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventCreated, "%s nfs-server service: %s", strings.Title(string(svcop)), svc.Name) - case controllerutil.OperationResultUpdated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventUpdated, "%s nfs-server service: %s", strings.Title(string(svcop)), svc.Name) - } - - sts, err := newStatefulSetForNFSServer(cr, r.Context.Clientset, ctx) - if err != nil { - return fmt.Errorf("unable to generate the NFS StatefulSet spec: %v", err) - } - - stsop, err := controllerutil.CreateOrUpdate(ctx, r.Client, sts, func() error { - if sts.ObjectMeta.CreationTimestamp.IsZero() { - sts.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: newLabels(cr), - } - } - - if err := controllerutil.SetControllerReference(cr, sts, r.Scheme); err != nil { - return err - } - - volumes := []corev1.Volume{ - { - Name: cr.Name, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: cr.Name, - }, - Items: []corev1.KeyToPath{ - { - Key: cr.Name, - Path: cr.Name, - }, - }, - DefaultMode: pointer.Int32Ptr(corev1.ConfigMapVolumeSourceDefaultMode), - }, - }, - }, - } - volumeMounts := []corev1.VolumeMount{ - { - Name: cr.Name, - MountPath: nfsConfigMapPath, - }, - } - for _, export := range cr.Spec.Exports { - shareName := export.Name - claimName := export.PersistentVolumeClaim.ClaimName - volumes = append(volumes, corev1.Volume{ - Name: shareName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: claimName, - }, - }, - }) - - volumeMounts = append(volumeMounts, corev1.VolumeMount{ - Name: shareName, - MountPath: path.Join("/", claimName), - }) - } - - sts.Spec.Template.Spec.Volumes = volumes - for i, container := range sts.Spec.Template.Spec.Containers { - if container.Name == "nfs-server" || container.Name == "nfs-provisioner" { - sts.Spec.Template.Spec.Containers[i].VolumeMounts = volumeMounts - } - } - - return nil - }) - - if err != nil { - return err - } - - r.Log.Info("Reconciling NFSServer StatefulSet", "Operation.Result ", stsop) - switch stsop { - case controllerutil.OperationResultCreated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventCreated, "%s nfs-server statefulset: %s", strings.Title(string(stsop)), sts.Name) - return nil - case controllerutil.OperationResultUpdated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventUpdated, "%s nfs-server statefulset: %s", strings.Title(string(stsop)), sts.Name) - return nil - default: - return nil - } -} diff --git a/pkg/operator/nfs/controller_test.go b/pkg/operator/nfs/controller_test.go deleted file mode 100644 index 67159efff..000000000 --- a/pkg/operator/nfs/controller_test.go +++ /dev/null @@ -1,309 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "os" - "path" - "reflect" - "testing" - - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -type resourceGenerator interface { - WithExports(exportName, serverAccessMode, serverSquashType, pvcName string) resourceGenerator - WithState(state nfsv1alpha1.NFSServerState) resourceGenerator - Generate() *nfsv1alpha1.NFSServer -} - -type resource struct { - name string - namespace string - exports []nfsv1alpha1.ExportsSpec - state nfsv1alpha1.NFSServerState -} - -func newCustomResource(namespacedName types.NamespacedName) resourceGenerator { - return &resource{ - name: namespacedName.Name, - namespace: namespacedName.Namespace, - } -} - -func (r *resource) WithExports(exportName, serverAccessMode, serverSquashType, pvcName string) resourceGenerator { - r.exports = append(r.exports, nfsv1alpha1.ExportsSpec{ - Name: exportName, - Server: nfsv1alpha1.ServerSpec{ - AccessMode: serverAccessMode, - Squash: serverSquashType, - }, - PersistentVolumeClaim: corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }) - - return r -} - -func (r *resource) WithState(state nfsv1alpha1.NFSServerState) resourceGenerator { - r.state = state - return r -} - -func (r *resource) Generate() *nfsv1alpha1.NFSServer { - return &nfsv1alpha1.NFSServer{ - ObjectMeta: metav1.ObjectMeta{ - Name: r.name, - Namespace: r.namespace, - }, - Spec: nfsv1alpha1.NFSServerSpec{ - Replicas: 1, - Exports: r.exports, - }, - Status: nfsv1alpha1.NFSServerStatus{ - State: r.state, - }, - } -} - -func TestNFSServerReconciler_Reconcile(t *testing.T) { - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.PodNameEnvVar, "rook-operator") - defer os.Unsetenv(k8sutil.PodNameEnvVar) - - ctx := context.TODO() - clientset := test.New(t, 3) - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-operator", - Namespace: "rook-system", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "mypodContainer", - Image: "rook/test", - }, - }, - }, - } - _, err := clientset.CoreV1().Pods(pod.Namespace).Create(ctx, &pod, metav1.CreateOptions{}) - if err != nil { - t.Errorf("Error creating the rook-operator pod: %v", err) - } - clusterdContext := &clusterd.Context{Clientset: clientset} - - expectedServerFunc := func(scheme *runtime.Scheme, cr *nfsv1alpha1.NFSServer) *appsv1.StatefulSet { - sts, err := newStatefulSetForNFSServer(cr, clientset, ctx) - if err != nil { - t.Errorf("Error creating the expectedServerFunc: %v", err) - return nil - } - sts.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: newLabels(cr), - } - _ = controllerutil.SetControllerReference(cr, sts, scheme) - volumes := []corev1.Volume{ - { - Name: cr.Name, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: cr.Name, - }, - Items: []corev1.KeyToPath{ - { - Key: cr.Name, - Path: cr.Name, - }, - }, - DefaultMode: pointer.Int32Ptr(corev1.ConfigMapVolumeSourceDefaultMode), - }, - }, - }, - } - volumeMounts := []corev1.VolumeMount{ - { - Name: cr.Name, - MountPath: nfsConfigMapPath, - }, - } - for _, export := range cr.Spec.Exports { - shareName := export.Name - claimName := export.PersistentVolumeClaim.ClaimName - volumes = append(volumes, corev1.Volume{ - Name: shareName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: claimName, - }, - }, - }) - - volumeMounts = append(volumeMounts, corev1.VolumeMount{ - Name: shareName, - MountPath: path.Join("/", claimName), - }) - } - sts.Status.ReadyReplicas = int32(cr.Spec.Replicas) - sts.Spec.Template.Spec.Volumes = volumes - for i, container := range sts.Spec.Template.Spec.Containers { - if container.Name == "nfs-server" || container.Name == "nfs-provisioner" { - sts.Spec.Template.Spec.Containers[i].VolumeMounts = volumeMounts - } - } - - return sts - } - - expectedServerServiceFunc := func(scheme *runtime.Scheme, cr *nfsv1alpha1.NFSServer) *corev1.Service { - svc := newServiceForNFSServer(cr) - _ = controllerutil.SetControllerReference(cr, svc, scheme) - return svc - } - - rr := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: "nfs-server", - Namespace: "nfs-server", - }, - } - - type args struct { - req ctrl.Request - } - tests := []struct { - name string - args args - cr *nfsv1alpha1.NFSServer - want ctrl.Result - wantErr bool - }{ - { - name: "Reconcile NFS Server Should Set Initializing State when State is Empty", - args: args{ - req: rr, - }, - cr: newCustomResource(rr.NamespacedName).WithExports("share1", "ReadWrite", "none", "test-claim").Generate(), - want: reconcile.Result{Requeue: true}, - }, - { - name: "Reconcile NFS Server Shouldn't Requeue when State is Error", - args: args{ - req: rr, - }, - cr: newCustomResource(rr.NamespacedName).WithExports("share1", "ReadWrite", "none", "test-claim").WithState(nfsv1alpha1.StateError).Generate(), - want: reconcile.Result{Requeue: false}, - }, - { - name: "Reconcile NFS Server Should Error on Duplicate Export", - args: args{ - req: rr, - }, - cr: newCustomResource(rr.NamespacedName).WithExports("share1", "ReadWrite", "none", "test-claim").WithExports("share1", "ReadWrite", "none", "test-claim").WithState(nfsv1alpha1.StateInitializing).Generate(), - wantErr: true, - }, - { - name: "Reconcile NFS Server With Single Export", - args: args{ - req: rr, - }, - cr: newCustomResource(rr.NamespacedName).WithExports("share1", "ReadWrite", "none", "test-claim").WithState(nfsv1alpha1.StateInitializing).Generate(), - }, - { - name: "Reconcile NFS Server With Multiple Export", - args: args{ - req: rr, - }, - cr: newCustomResource(rr.NamespacedName).WithExports("share1", "ReadWrite", "none", "test-claim").WithExports("share2", "ReadOnly", "none", "another-test-claim").WithState(nfsv1alpha1.StateInitializing).Generate(), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - scheme := clientgoscheme.Scheme - scheme.AddKnownTypes(nfsv1alpha1.SchemeGroupVersion, tt.cr) - - expectedServer := expectedServerFunc(scheme, tt.cr) - expectedServerService := expectedServerServiceFunc(scheme, tt.cr) - - objs := []runtime.Object{ - tt.cr, - expectedServer, - expectedServerService, - } - - expectedServer.GetObjectKind().SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("StatefulSet")) - expectedServerService.GetObjectKind().SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) - - fc := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() - fr := record.NewFakeRecorder(2) - - r := &NFSServerReconciler{ - Context: clusterdContext, - Client: fc, - Scheme: scheme, - Log: logger, - Recorder: fr, - } - got, err := r.Reconcile(context.TODO(), tt.args.req) - if (err != nil) != tt.wantErr { - t.Errorf("NFSServerReconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("NFSServerReconciler.Reconcile() = %v, want %v", got, tt.want) - } - - gotServer := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), tt.args.req.NamespacedName, gotServer); err != nil { - t.Errorf("NFSServerReconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(gotServer, expectedServer) { - t.Errorf("NFSServerReconciler.Reconcile() = %v, want %v", gotServer, expectedServer) - } - - gotServerService := &corev1.Service{} - if err := fc.Get(context.Background(), tt.args.req.NamespacedName, gotServerService); err != nil { - t.Errorf("NFSServerReconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(gotServerService, expectedServerService) { - t.Errorf("NFSServerReconciler.Reconcile() = %v, want %v", gotServerService, expectedServerService) - } - }) - } -} diff --git a/pkg/operator/nfs/operator.go b/pkg/operator/nfs/operator.go deleted file mode 100644 index 36c534087..000000000 --- a/pkg/operator/nfs/operator.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package nfs operator to manage NFS Server. -package nfs - -import ( - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - - "github.com/coreos/pkg/capnslog" - "github.com/rook/rook/pkg/clusterd" - "k8s.io/apimachinery/pkg/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - ctrl "sigs.k8s.io/controller-runtime" -) - -var ( - scheme = runtime.NewScheme() - controllerName = "nfs-operator" - logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) -) - -// Operator type for managing NFS Server. -type Operator struct { - context *clusterd.Context -} - -func init() { - _ = clientgoscheme.AddToScheme(scheme) - _ = nfsv1alpha1.AddToScheme(scheme) -} - -// New creates an operator instance. -func New(context *clusterd.Context) *Operator { - return &Operator{ - context: context, - } -} - -// Run the operator instance. -func (o *Operator) Run() error { - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - }) - if err != nil { - return err - } - - reconciler := &NFSServerReconciler{ - Client: mgr.GetClient(), - Context: o.context, - Log: logger, - Scheme: scheme, - Recorder: mgr.GetEventRecorderFor(controllerName), - } - - if err := ctrl.NewControllerManagedBy(mgr). - For(&nfsv1alpha1.NFSServer{}). - Complete(reconciler); err != nil { - return err - } - - logger.Info("starting manager") - return mgr.Start(ctrl.SetupSignalHandler()) -} diff --git a/pkg/operator/nfs/provisioner.go b/pkg/operator/nfs/provisioner.go deleted file mode 100644 index 5cf5b76df..000000000 --- a/pkg/operator/nfs/provisioner.go +++ /dev/null @@ -1,283 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "fmt" - "os" - "path" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/pkg/errors" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned" - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/component-helpers/storage/volume" - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - nfsServerNameSCParam = "nfsServerName" - nfsServerNamespaceSCParam = "nfsServerNamespace" - exportNameSCParam = "exportName" - projectBlockAnnotationKey = "nfs.rook.io/project_block" -) - -var ( - mountPath = "/" -) - -type Provisioner struct { - client kubernetes.Interface - rookClient rookclient.Interface - quotaer Quotaer -} - -var _ controller.Provisioner = &Provisioner{} - -// NewNFSProvisioner returns an instance of nfsProvisioner -func NewNFSProvisioner(clientset kubernetes.Interface, rookClientset rookclient.Interface) (*Provisioner, error) { - quotaer, err := NewProjectQuota() - if err != nil { - return nil, err - } - - return &Provisioner{ - client: clientset, - rookClient: rookClientset, - quotaer: quotaer, - }, nil -} - -// Provision(context.Context, ProvisionOptions) (*v1.PersistentVolume, ProvisioningState, error) -func (p *Provisioner) Provision(ctx context.Context, options controller.ProvisionOptions) (*v1.PersistentVolume, controller.ProvisioningState, error) { - logger.Infof("nfs provisioner: ProvisionOptions %v", options) - annotations := make(map[string]string) - - if options.PVC.Spec.Selector != nil { - return nil, controller.ProvisioningFinished, fmt.Errorf("claim Selector is not supported") - } - - sc, err := p.storageClassForPVC(ctx, options.PVC) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - serverName, present := sc.Parameters[nfsServerNameSCParam] - if !present { - return nil, controller.ProvisioningFinished, errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - serverNamespace, present := sc.Parameters[nfsServerNamespaceSCParam] - if !present { - return nil, controller.ProvisioningFinished, errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - exportName, present := sc.Parameters[exportNameSCParam] - if !present { - return nil, controller.ProvisioningFinished, errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - nfsserver, err := p.rookClient.NfsV1alpha1().NFSServers(serverNamespace).Get(ctx, serverName, metav1.GetOptions{}) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - nfsserversvc, err := p.client.CoreV1().Services(serverNamespace).Get(ctx, serverName, metav1.GetOptions{}) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - var ( - exportPath string - found bool - ) - - for _, export := range nfsserver.Spec.Exports { - if export.Name == exportName { - exportPath = path.Join(mountPath, export.PersistentVolumeClaim.ClaimName) - found = true - } - } - - if !found { - return nil, controller.ProvisioningFinished, fmt.Errorf("No export name from storageclass is match with NFSServer %s in namespace %s", nfsserver.Name, nfsserver.Namespace) - } - - pvName := strings.Join([]string{options.PVC.Namespace, options.PVC.Name, options.PVName}, "-") - fullPath := path.Join(exportPath, pvName) - if err := os.MkdirAll(fullPath, 0700); err != nil { - return nil, controller.ProvisioningFinished, errors.New("unable to create directory to provision new pv: " + err.Error()) - } - - capacity := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - block, err := p.createQuota(exportPath, fullPath, strconv.FormatInt(capacity.Value(), 10)) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - annotations[projectBlockAnnotationKey] = block - - pv := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: options.PVName, - Annotations: annotations, - }, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: *options.StorageClass.ReclaimPolicy, - AccessModes: options.PVC.Spec.AccessModes, - MountOptions: options.StorageClass.MountOptions, - Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): capacity, - }, - PersistentVolumeSource: v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{ - Server: nfsserversvc.Spec.ClusterIP, - Path: fullPath, - ReadOnly: false, - }, - }, - }, - } - - return pv, controller.ProvisioningFinished, nil -} - -func (p *Provisioner) Delete(ctx context.Context, volume *v1.PersistentVolume) error { - nfsPath := volume.Spec.PersistentVolumeSource.NFS.Path - pvName := path.Base(nfsPath) - - sc, err := p.storageClassForPV(ctx, volume) - if err != nil { - return err - } - - serverName, present := sc.Parameters[nfsServerNameSCParam] - if !present { - return errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - serverNamespace, present := sc.Parameters[nfsServerNamespaceSCParam] - if !present { - return errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - exportName, present := sc.Parameters[exportNameSCParam] - if !present { - return errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - nfsserver, err := p.rookClient.NfsV1alpha1().NFSServers(serverNamespace).Get(ctx, serverName, metav1.GetOptions{}) - if err != nil { - return err - } - - var ( - exportPath string - found bool - ) - - for _, export := range nfsserver.Spec.Exports { - if export.Name == exportName { - exportPath = path.Join(mountPath, export.PersistentVolumeClaim.ClaimName) - found = true - } - } - - if !found { - return fmt.Errorf("No export name from storageclass is match with NFSServer %s in namespace %s", nfsserver.Name, nfsserver.Namespace) - } - - block, ok := volume.Annotations[projectBlockAnnotationKey] - if !ok { - return fmt.Errorf("PV doesn't have an annotation with key %s", projectBlockAnnotationKey) - } - - if err := p.removeQuota(exportPath, block); err != nil { - return err - } - - fullPath := path.Join(exportPath, pvName) - return os.RemoveAll(fullPath) -} - -func (p *Provisioner) createQuota(exportPath, directory string, limit string) (string, error) { - projectsFile := filepath.Join(exportPath, "projects") - if _, err := os.Stat(projectsFile); err != nil { - if os.IsNotExist(err) { - return "", nil - } - - return "", fmt.Errorf("error checking projects file in directory %s: %v", exportPath, err) - } - - return p.quotaer.CreateProjectQuota(projectsFile, directory, limit) -} - -func (p *Provisioner) removeQuota(exportPath, block string) error { - var projectID uint16 - projectsFile := filepath.Join(exportPath, "projects") - if _, err := os.Stat(projectsFile); err != nil { - if os.IsNotExist(err) { - return nil - } - - return fmt.Errorf("error checking projects file in directory %s: %v", exportPath, err) - } - - re := regexp.MustCompile("(?m:^([0-9]+):(.+):(.+)$)") - allMatches := re.FindAllStringSubmatch(block, -1) - for _, match := range allMatches { - digits := match[1] - if id, err := strconv.ParseUint(string(digits), 10, 16); err == nil { - projectID = uint16(id) - } - } - - return p.quotaer.RemoveProjectQuota(projectID, projectsFile, block) -} - -func (p *Provisioner) storageClassForPV(ctx context.Context, pv *v1.PersistentVolume) (*storagev1.StorageClass, error) { - if p.client == nil { - return nil, fmt.Errorf("Cannot get kube client") - } - className := volume.GetPersistentVolumeClass(pv) - if className == "" { - return nil, fmt.Errorf("Volume has no storage class") - } - - return p.client.StorageV1().StorageClasses().Get(ctx, className, metav1.GetOptions{}) -} - -func (p *Provisioner) storageClassForPVC(ctx context.Context, pvc *v1.PersistentVolumeClaim) (*storagev1.StorageClass, error) { - if p.client == nil { - return nil, fmt.Errorf("Cannot get kube client") - } - className := volume.GetPersistentVolumeClaimClass(pvc) - if className == "" { - return nil, fmt.Errorf("Volume has no storage class") - } - - return p.client.StorageV1().StorageClasses().Get(ctx, className, metav1.GetOptions{}) -} diff --git a/pkg/operator/nfs/provisioner_test.go b/pkg/operator/nfs/provisioner_test.go deleted file mode 100644 index 8c71f994f..000000000 --- a/pkg/operator/nfs/provisioner_test.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "os" - "reflect" - "testing" - - rookclient "github.com/rook/rook/pkg/client/clientset/versioned" - rookclientfake "github.com/rook/rook/pkg/client/clientset/versioned/fake" - corev1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - apiresource "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - k8sclientfake "k8s.io/client-go/kubernetes/fake" - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" -) - -func init() { - mountPath = "/tmp/test-rook-nfs" -} - -func newDummyStorageClass(name string, nfsServerNamespacedName types.NamespacedName, reclaimPolicy corev1.PersistentVolumeReclaimPolicy) *storagev1.StorageClass { - return &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Parameters: map[string]string{ - nfsServerNameSCParam: nfsServerNamespacedName.Name, - nfsServerNamespaceSCParam: nfsServerNamespacedName.Namespace, - exportNameSCParam: name, - }, - ReclaimPolicy: &reclaimPolicy, - } -} - -func newDummyPVC(name, namespace string, capacity apiresource.Quantity, storageClassName string) *corev1.PersistentVolumeClaim { - return &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceName(corev1.ResourceStorage): capacity, - }, - }, - StorageClassName: &storageClassName, - }, - } -} - -func newDummyPV(name, scName, expectedPath string, expectedCapacity apiresource.Quantity, expectedReclaimPolicy corev1.PersistentVolumeReclaimPolicy) *corev1.PersistentVolume { - annotations := make(map[string]string) - annotations[projectBlockAnnotationKey] = "" - return &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Annotations: annotations, - }, - Spec: corev1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: expectedReclaimPolicy, - Capacity: corev1.ResourceList{ - corev1.ResourceName(corev1.ResourceStorage): expectedCapacity, - }, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - NFS: &corev1.NFSVolumeSource{ - Path: expectedPath, - }, - }, - StorageClassName: scName, - }, - } -} - -func TestProvisioner_Provision(t *testing.T) { - ctx := context.TODO() - if err := os.MkdirAll(mountPath, 0755); err != nil { - t.Error("error creating test provisioner directory") - } - - defer os.RemoveAll(mountPath) - - fakeQuoater, err := NewFakeProjectQuota() - if err != nil { - t.Error(err) - } - - nfsserver := newCustomResource(types.NamespacedName{Name: "test-nfsserver", Namespace: "test-nfsserver"}).WithExports("share-1", "ReadWrite", "none", "test-claim").Generate() - - type fields struct { - client kubernetes.Interface - rookClient rookclient.Interface - quoater Quotaer - } - type args struct { - options controller.ProvisionOptions - } - tests := []struct { - name string - fields fields - args args - want *corev1.PersistentVolume - wantErr bool - }{ - { - name: "success create volume", - fields: fields{ - client: k8sclientfake.NewSimpleClientset( - newServiceForNFSServer(nfsserver), - newDummyStorageClass("share-1", types.NamespacedName{Name: nfsserver.Name, Namespace: nfsserver.Namespace}, corev1.PersistentVolumeReclaimDelete), - ), - rookClient: rookclientfake.NewSimpleClientset( - nfsserver, - ), - quoater: fakeQuoater, - }, - args: args{ - options: controller.ProvisionOptions{ - StorageClass: newDummyStorageClass("share-1", types.NamespacedName{Name: nfsserver.Name, Namespace: nfsserver.Namespace}, corev1.PersistentVolumeReclaimDelete), - PVName: "share-1-pvc", - PVC: newDummyPVC("share-1-pvc", "default", apiresource.MustParse("1Mi"), "share-1"), - }, - }, - want: newDummyPV("share-1-pvc", "", "/tmp/test-rook-nfs/test-claim/default-share-1-pvc-share-1-pvc", apiresource.MustParse("1Mi"), corev1.PersistentVolumeReclaimDelete), - }, - { - name: "no matching export", - fields: fields{ - client: k8sclientfake.NewSimpleClientset( - newServiceForNFSServer(nfsserver), - newDummyStorageClass("foo", types.NamespacedName{Name: nfsserver.Name, Namespace: nfsserver.Namespace}, corev1.PersistentVolumeReclaimDelete), - ), - rookClient: rookclientfake.NewSimpleClientset( - nfsserver, - ), - }, - args: args{ - options: controller.ProvisionOptions{ - StorageClass: newDummyStorageClass("foo", types.NamespacedName{Name: nfsserver.Name, Namespace: nfsserver.Namespace}, corev1.PersistentVolumeReclaimDelete), - PVName: "share-1-pvc", - PVC: newDummyPVC("share-1-pvc", "default", apiresource.MustParse("1Mi"), "foo"), - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := &Provisioner{ - client: tt.fields.client, - rookClient: tt.fields.rookClient, - quotaer: tt.fields.quoater, - } - got, _, err := p.Provision(ctx, tt.args.options) - if (err != nil) != tt.wantErr { - t.Errorf("Provisioner.Provision() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Provisioner.Provision() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestProvisioner_Delete(t *testing.T) { - ctx := context.TODO() - if err := os.MkdirAll(mountPath, 0755); err != nil { - t.Error("error creating test provisioner directory") - } - - defer os.RemoveAll(mountPath) - - fakeQuoater, err := NewFakeProjectQuota() - if err != nil { - t.Error(err) - } - - nfsserver := newCustomResource(types.NamespacedName{Name: "test-nfsserver", Namespace: "test-nfsserver"}).WithExports("share-1", "ReadWrite", "none", "test-claim").Generate() - type fields struct { - client kubernetes.Interface - rookClient rookclient.Interface - quoater Quotaer - } - type args struct { - volume *corev1.PersistentVolume - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "success delete volume", - fields: fields{ - client: k8sclientfake.NewSimpleClientset( - newServiceForNFSServer(nfsserver), - newDummyStorageClass("share-1", types.NamespacedName{Name: nfsserver.Name, Namespace: nfsserver.Namespace}, corev1.PersistentVolumeReclaimDelete), - ), - rookClient: rookclientfake.NewSimpleClientset( - nfsserver, - ), - quoater: fakeQuoater, - }, - args: args{ - volume: newDummyPV("share-1-pvc", "share-1", "/tmp/test-rook-nfs/test-claim/default-share-1-pvc-share-1-pvc", apiresource.MustParse("1Mi"), corev1.PersistentVolumeReclaimDelete), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := &Provisioner{ - client: tt.fields.client, - rookClient: tt.fields.rookClient, - quotaer: tt.fields.quoater, - } - if err := p.Delete(ctx, tt.args.volume); (err != nil) != tt.wantErr { - t.Errorf("Provisioner.Delete() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/pkg/operator/nfs/quota.go b/pkg/operator/nfs/quota.go deleted file mode 100644 index 9881b4183..000000000 --- a/pkg/operator/nfs/quota.go +++ /dev/null @@ -1,264 +0,0 @@ -package nfs - -import ( - "fmt" - "io/ioutil" - "math" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" - "sync" - - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/mount" -) - -type Quotaer interface { - CreateProjectQuota(projectsFile, directory, limit string) (string, error) - RemoveProjectQuota(projectID uint16, projectsFile, block string) error - RestoreProjectQuota() error -} - -type Quota struct { - mutex *sync.Mutex - projectsIDs map[string]map[uint16]bool -} - -func NewProjectQuota() (Quotaer, error) { - projectsIDs := map[string]map[uint16]bool{} - mountEntries, err := findProjectQuotaMount() - if err != nil { - return nil, err - } - - for _, entry := range mountEntries { - exportName := filepath.Base(entry.Mountpoint) - projectsIDs[exportName] = map[uint16]bool{} - projectsFile := filepath.Join(entry.Mountpoint, "projects") - _, err := os.Stat(projectsFile) - if os.IsNotExist(err) { - logger.Infof("creating new project file %s", projectsFile) - file, cerr := os.Create(projectsFile) - if cerr != nil { - return nil, fmt.Errorf("error creating xfs projects file %s: %v", projectsFile, cerr) - } - - if err := file.Close(); err != nil { - return nil, err - } - } else { - logger.Infof("found project file %s, restoring project ids", projectsFile) - re := regexp.MustCompile("(?m:^([0-9]+):/.+$)") - projectIDs, err := restoreProjectIDs(projectsFile, re) - if err != nil { - logger.Errorf("error while populating projectIDs map, there may be errors setting quotas later if projectIDs are reused: %v", err) - } - - projectsIDs[exportName] = projectIDs - } - } - - quota := &Quota{ - mutex: &sync.Mutex{}, - projectsIDs: projectsIDs, - } - - if err := quota.RestoreProjectQuota(); err != nil { - return nil, err - } - - return quota, nil -} - -func findProjectQuotaMount() ([]*mount.Info, error) { - var entries []*mount.Info - allEntries, err := mount.GetMounts() - if err != nil { - return nil, err - } - - for _, entry := range allEntries { - // currently we only support xfs - if entry.Fstype != "xfs" { - continue - } - - if filepath.Dir(entry.Mountpoint) == mountPath && (strings.Contains(entry.VfsOpts, "pquota") || strings.Contains(entry.VfsOpts, "prjquota")) { - entries = append(entries, entry) - } - } - - return entries, nil -} - -func restoreProjectIDs(projectsFile string, re *regexp.Regexp) (map[uint16]bool, error) { - ids := map[uint16]bool{} - digitsRe := "([0-9]+)" - if !strings.Contains(re.String(), digitsRe) { - return ids, fmt.Errorf("regexp %s doesn't contain digits submatch %s", re.String(), digitsRe) - } - - read, err := ioutil.ReadFile(projectsFile) // #nosec - if err != nil { - return ids, err - } - - allMatches := re.FindAllSubmatch(read, -1) - for _, match := range allMatches { - digits := match[1] - if id, err := strconv.ParseUint(string(digits), 10, 16); err == nil { - ids[uint16(id)] = true - } - } - - return ids, nil -} - -func (q *Quota) CreateProjectQuota(projectsFile, directory, limit string) (string, error) { - exportName := filepath.Base(filepath.Dir(projectsFile)) - - q.mutex.Lock() - projectID := uint16(1) - for ; projectID < math.MaxUint16; projectID++ { - if _, ok := q.projectsIDs[exportName][projectID]; !ok { - break - } - } - - q.projectsIDs[exportName][projectID] = true - block := strconv.FormatUint(uint64(projectID), 10) + ":" + directory + ":" + limit + "\n" - file, err := os.OpenFile(projectsFile, os.O_APPEND|os.O_WRONLY, 0600) // #nosec - if err != nil { - q.mutex.Unlock() - return "", err - } - - defer func() { - if err := file.Close(); err != nil { - logger.Errorf("Error closing file: %s\n", err) - } - }() - - if _, err = file.WriteString(block); err != nil { - q.mutex.Unlock() - return "", err - } - - if err := file.Sync(); err != nil { - q.mutex.Unlock() - return "", err - } - - logger.Infof("set project to %s for directory %s with limit %s", projectsFile, directory, limit) - if err := q.setProject(projectID, projectsFile, directory); err != nil { - q.mutex.Unlock() - return "", err - } - - logger.Infof("set quota for project id %d with limit %s", projectID, limit) - if err := q.setQuota(projectID, projectsFile, directory, limit); err != nil { - q.mutex.Unlock() - _ = q.removeProject(projectID, projectsFile, block) - } - - q.mutex.Unlock() - return block, nil -} - -func (q *Quota) RemoveProjectQuota(projectID uint16, projectsFile, block string) error { - return q.removeProject(projectID, projectsFile, block) -} - -func (q *Quota) RestoreProjectQuota() error { - mountEntries, err := findProjectQuotaMount() - if err != nil { - return err - } - - for _, entry := range mountEntries { - projectsFile := filepath.Join(entry.Mountpoint, "projects") - if _, err := os.Stat(projectsFile); err != nil { - if os.IsNotExist(err) { - continue - } - - return err - } - read, err := ioutil.ReadFile(projectsFile) // #nosec - if err != nil { - return err - } - - re := regexp.MustCompile("(?m:^([0-9]+):(.+):(.+)$\n)") - matches := re.FindAllSubmatch(read, -1) - for _, match := range matches { - projectID, _ := strconv.ParseUint(string(match[1]), 10, 16) - directory := string(match[2]) - bhard := string(match[3]) - - if _, err := os.Stat(directory); os.IsNotExist(err) { - _ = q.removeProject(uint16(projectID), projectsFile, string(match[0])) - continue - } - - if err := q.setProject(uint16(projectID), projectsFile, directory); err != nil { - return err - } - - logger.Infof("restoring quotas from project file %s for project id %s", string(match[1]), projectsFile) - if err := q.setQuota(uint16(projectID), projectsFile, directory, bhard); err != nil { - return fmt.Errorf("error restoring quota for directory %s: %v", directory, err) - } - } - } - - return nil -} - -func (q *Quota) setProject(projectID uint16, projectsFile, directory string) error { - cmd := exec.Command("xfs_quota", "-x", "-c", fmt.Sprintf("project -s -p %s %s", directory, strconv.FormatUint(uint64(projectID), 10)), filepath.Dir(projectsFile)) // #nosec - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("xfs_quota failed with error: %v, output: %s", err, out) - } - - return nil -} - -func (q *Quota) setQuota(projectID uint16, projectsFile, directory, bhard string) error { - exportName := filepath.Base(filepath.Dir(projectsFile)) - if !q.projectsIDs[exportName][projectID] { - return fmt.Errorf("project with id %v has not been added", projectID) - } - - cmd := exec.Command("xfs_quota", "-x", "-c", fmt.Sprintf("limit -p bhard=%s %s", bhard, strconv.FormatUint(uint64(projectID), 10)), filepath.Dir(projectsFile)) // #nosec - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("xfs_quota failed with error: %v, output: %s", err, out) - } - - return nil -} - -func (q *Quota) removeProject(projectID uint16, projectsFile, block string) error { - exportName := filepath.Base(filepath.Dir(projectsFile)) - q.mutex.Lock() - delete(q.projectsIDs[exportName], projectID) - read, err := ioutil.ReadFile(projectsFile) // #nosec - if err != nil { - q.mutex.Unlock() - return err - } - - removed := strings.Replace(string(read), block, "", -1) - err = ioutil.WriteFile(projectsFile, []byte(removed), 0) - if err != nil { - q.mutex.Unlock() - return err - } - - q.mutex.Unlock() - return nil -} diff --git a/pkg/operator/nfs/quota_fake.go b/pkg/operator/nfs/quota_fake.go deleted file mode 100644 index fc9e2ebf9..000000000 --- a/pkg/operator/nfs/quota_fake.go +++ /dev/null @@ -1,19 +0,0 @@ -package nfs - -type FakeQuota struct{} - -func NewFakeProjectQuota() (Quotaer, error) { - return &FakeQuota{}, nil -} - -func (q *FakeQuota) CreateProjectQuota(projectsFile, directory, limit string) (string, error) { - return "", nil -} - -func (q *FakeQuota) RemoveProjectQuota(projectID uint16, projectsFile, block string) error { - return nil -} - -func (q *FakeQuota) RestoreProjectQuota() error { - return nil -} diff --git a/pkg/operator/nfs/server.go b/pkg/operator/nfs/server.go deleted file mode 100644 index a61785c13..000000000 --- a/pkg/operator/nfs/server.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Portion of this file is coming from https://github.com/kubernetes-incubator/external-storage/blob/master/nfs/pkg/server/server.go -package nfs - -import ( - "fmt" - "os/exec" - "syscall" -) - -const ( - ganeshaLog = "/dev/stdout" - ganeshaOptions = "NIV_INFO" -) - -// Setup sets up various prerequisites and settings for the server. If an error -// is encountered at any point it returns it instantly -func Setup(ganeshaConfig string) error { - // Start rpcbind if it is not started yet - cmd := exec.Command("rpcinfo", "127.0.0.1") - if err := cmd.Run(); err != nil { - cmd = exec.Command("rpcbind", "-w") - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("Starting rpcbind failed with error: %v, output: %s", err, out) - } - } - - cmd = exec.Command("rpc.statd") - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("rpc.statd failed with error: %v, output: %s", err, out) - } - - // Start dbus, needed for ganesha dynamic exports - cmd = exec.Command("dbus-daemon", "--system") - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("dbus-daemon failed with error: %v, output: %s", err, out) - } - - err := setRlimitNOFILE() - if err != nil { - logger.Warningf("Error setting RLIMIT_NOFILE, there may be \"Too many open files\" errors later: %v", err) - } - return nil -} - -// Run : run the NFS server in the foreground until it exits -// Ideally, it should never exit when run in foreground mode -// We force foreground to allow the provisioner process to restart -// the server if it crashes - daemonization prevents us from using Wait() -// for this purpose -func Run(ganeshaConfig string) error { - // Start ganesha.nfsd - logger.Infof("Running NFS server!") - // #nosec G204 Rook controls the input to the exec arguments - cmd := exec.Command("ganesha.nfsd", "-F", "-L", ganeshaLog, "-f", ganeshaConfig, "-N", ganeshaOptions) - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("ganesha.nfsd failed with error: %v, output: %s", err, out) - } - return nil -} - -func setRlimitNOFILE() error { - var rlimit syscall.Rlimit - err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit) - if err != nil { - return fmt.Errorf("error getting RLIMIT_NOFILE: %v", err) - } - logger.Infof("starting RLIMIT_NOFILE rlimit.Cur %d, rlimit.Max %d", rlimit.Cur, rlimit.Max) - rlimit.Max = 1024 * 1024 - rlimit.Cur = 1024 * 1024 - err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit) - if err != nil { - return err - } - err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit) - if err != nil { - return fmt.Errorf("error getting RLIMIT_NOFILE: %v", err) - } - logger.Infof("ending RLIMIT_NOFILE rlimit.Cur %d, rlimit.Max %d", rlimit.Cur, rlimit.Max) - return nil -} - -// Stop stops the NFS server. -func Stop() { - // /bin/dbus-send --system --dest=org.ganesha.nfsd --type=method_call /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.shutdown -} diff --git a/pkg/operator/nfs/spec.go b/pkg/operator/nfs/spec.go deleted file mode 100644 index 1dc1d97ff..000000000 --- a/pkg/operator/nfs/spec.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" -) - -func newLabels(cr *nfsv1alpha1.NFSServer) map[string]string { - return map[string]string{ - "app": cr.Name, - } -} - -func newConfigMapForNFSServer(cr *nfsv1alpha1.NFSServer) *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cr.Name, - Namespace: cr.Namespace, - Labels: newLabels(cr), - }, - } -} - -func newServiceForNFSServer(cr *nfsv1alpha1.NFSServer) *corev1.Service { - return &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: cr.Name, - Namespace: cr.Namespace, - Labels: newLabels(cr), - }, - Spec: corev1.ServiceSpec{ - Selector: newLabels(cr), - Type: corev1.ServiceTypeClusterIP, - Ports: []corev1.ServicePort{ - { - Name: "nfs", - Port: int32(nfsPort), - TargetPort: intstr.FromInt(int(nfsPort)), - }, - { - Name: "rpc", - Port: int32(rpcPort), - TargetPort: intstr.FromInt(int(rpcPort)), - }, - }, - }, - } -} - -func newStatefulSetForNFSServer(cr *nfsv1alpha1.NFSServer, clientset kubernetes.Interface, ctx context.Context) (*appsv1.StatefulSet, error) { - pod, err := k8sutil.GetRunningPod(clientset) - if err != nil { - return nil, err - } - image, err := k8sutil.GetContainerImage(pod, "") - if err != nil { - return nil, err - } - - privileged := true - replicas := int32(cr.Spec.Replicas) - return &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: cr.Name, - Namespace: cr.Namespace, - Labels: newLabels(cr), - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - ServiceName: cr.Name, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: cr.Name, - Namespace: cr.Namespace, - Labels: newLabels(cr), - }, - Spec: corev1.PodSpec{ - ServiceAccountName: "rook-nfs-server", - Containers: []corev1.Container{ - { - Name: "nfs-server", - Image: image, - Args: []string{"nfs", "server", "--ganeshaConfigPath=" + nfsConfigMapPath + "/" + cr.Name}, - Ports: []corev1.ContainerPort{ - { - Name: "nfs-port", - ContainerPort: int32(nfsPort), - }, - { - Name: "rpc-port", - ContainerPort: int32(rpcPort), - }, - }, - SecurityContext: &corev1.SecurityContext{ - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{ - "SYS_ADMIN", - "DAC_READ_SEARCH", - }, - }, - }, - }, - { - Name: "nfs-provisioner", - Image: image, - Args: []string{"nfs", "provisioner", "--provisioner=" + "nfs.rook.io/" + cr.Name + "-provisioner"}, - TerminationMessagePath: "/dev/termination-log", - TerminationMessagePolicy: corev1.TerminationMessageReadFile, - SecurityContext: &corev1.SecurityContext{ - Privileged: &privileged, - }, - }, - }, - }, - }, - }, - }, nil -} diff --git a/pkg/operator/nfs/webhook.go b/pkg/operator/nfs/webhook.go deleted file mode 100644 index cc399e822..000000000 --- a/pkg/operator/nfs/webhook.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - - ctrl "sigs.k8s.io/controller-runtime" -) - -type Webhook struct { - Port int - CertDir string -} - -func NewWebhook(port int, certDir string) *Webhook { - return &Webhook{ - Port: port, - CertDir: certDir, - } -} - -func (w *Webhook) Run() error { - opts := ctrl.Options{ - Port: w.Port, - Scheme: scheme, - } - - if w.CertDir != "" { - opts.CertDir = w.CertDir - } - - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), opts) - if err != nil { - return err - } - - if err := ctrl.NewWebhookManagedBy(mgr). - For(&nfsv1alpha1.NFSServer{}). - Complete(); err != nil { - return err - } - - logger.Info("starting webhook manager") - return mgr.Start(ctrl.SetupSignalHandler()) -} diff --git a/pkg/operator/test/spec.go b/pkg/operator/test/spec.go index 956e19c4b..06b5637fb 100644 --- a/pkg/operator/test/spec.go +++ b/pkg/operator/test/spec.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/assert" ) -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "ceph-op-testlib") +var logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "ceph-op-testlib") // ArgumentsMatchExpected returns a descriptive error if any of the expected arguments do not exist. // This supports arguments in which flags appear multiple times with different values but does not diff --git a/pkg/util/dependents/dependents.go b/pkg/util/dependents/dependents.go deleted file mode 100644 index 8f4ec6a4b..000000000 --- a/pkg/util/dependents/dependents.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dependents - -import ( - "fmt" - "sort" - "strings" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - corev1 "k8s.io/api/core/v1" -) - -func DeletionBlockedDueToDependentsCondition(blocked bool, message string) cephv1.Condition { - status := corev1.ConditionFalse - reason := cephv1.ObjectHasNoDependentsReason - if blocked { - status = corev1.ConditionTrue - reason = cephv1.ObjectHasDependentsReason - } - return cephv1.Condition{ - Type: cephv1.ConditionDeletionIsBlocked, - Status: status, - Reason: reason, - Message: message, - } -} - -// A DependentList represents a list of dependents of a resource. Each dependent has a plural Kind -// and a list of names of dependent resources. -type DependentList struct { - d map[string][]string // map from dependent Resource to a list of dependent names -} - -// NewDependentList creates a new empty DependentList. -func NewDependentList() *DependentList { - return &DependentList{ - d: make(map[string][]string), - } -} - -// Empty returns true if the DependentList is empty or false otherwise. -func (d *DependentList) Empty() bool { - return len(d.d) == 0 -} - -// Add adds a dependent name for a plural Kind to the DependentList. -func (d *DependentList) Add(pluralKind string, name string) { - names, ok := d.d[pluralKind] - if !ok { - d.d[pluralKind] = []string{name} - return - } - d.d[pluralKind] = append(names, name) -} - -// PluralKinds returns the plural Kinds that have dependents. -func (d *DependentList) PluralKinds() []string { - kinds := []string{} - for k := range d.d { - kinds = append(kinds, k) - } - return kinds -} - -// OfPluralKind returns the names of dependents of the Kind (plural), or an empty list if no -// dependents exist. -func (d *DependentList) OfPluralKind(pluralKind string) []string { - names, ok := d.d[pluralKind] - if !ok { - return []string{} - } - return names -} - -// StringWithHeader outputs the dependent list as a pretty-printed string headed with the given -// formatting directive (followed by a colon). It outputs dependents in alphabetical order by the -// plural Kind. -// Example: -// StringWithHeader("dependents of my %q", "mom") --> -// `dependents of my "mom": FirstResources: [name1], SecondResources: [name2 name2 name3]` -func (d *DependentList) StringWithHeader(headerFormat string, args ...interface{}) string { - header := fmt.Sprintf(headerFormat, args...) - if len(d.d) == 0 { - return fmt.Sprintf("%s: none", header) - } - deps := make([]string, 0, len(d.d)) - for pluralKind, names := range d.d { - deps = append(deps, fmt.Sprintf("%s: %v", pluralKind, names)) - } - sort.Strings(deps) // always output a consistent ordering - allDeps := strings.Join(deps, ", ") - return fmt.Sprintf("%s: %s", header, allDeps) -} diff --git a/pkg/util/dependents/dependents_test.go b/pkg/util/dependents/dependents_test.go deleted file mode 100644 index 325f98a92..000000000 --- a/pkg/util/dependents/dependents_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dependents - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDependentList(t *testing.T) { - containsExactlyOne := func(s, substr string) { - assert.Equal(t, 1, strings.Count(s, substr)) - } - isBefore := func(s, before, after string) { - assert.Less(t, strings.Index(s, before), strings.Index(s, after)) - } - - t.Run("empty", func(t *testing.T) { - d := NewDependentList() - assert.True(t, d.Empty()) - }) - - t.Run("one resource, one dependent", func(t *testing.T) { - d := NewDependentList() - d.Add("MyResources", "my-resource-1") - assert.False(t, d.Empty()) - assert.ElementsMatch(t, []string{"MyResources"}, d.PluralKinds()) - assert.ElementsMatch(t, []string{"my-resource-1"}, d.OfPluralKind("MyResources")) - toString := d.StringWithHeader("header") - containsExactlyOne(toString, "header:") - containsExactlyOne(toString, "MyResources") - containsExactlyOne(toString, "my-resource-1") - }) - - t.Run("one resource - multiple dependents", func(t *testing.T) { - d := NewDependentList() - d.Add("MyResources", "my-resource-1") - d.Add("MyResources", "my-resource-2") - d.Add("MyResources", "my-resource-3") - assert.False(t, d.Empty()) - assert.ElementsMatch(t, []string{"MyResources"}, d.PluralKinds()) - assert.ElementsMatch(t, []string{"my-resource-1", "my-resource-2", "my-resource-3"}, d.OfPluralKind("MyResources")) - assert.ElementsMatch(t, []string{}, d.OfPluralKind("OtherKinds")) - toString := d.StringWithHeader("head with arg %d", 1) - containsExactlyOne(toString, "head with arg 1:") - containsExactlyOne(toString, "MyResources") - containsExactlyOne(toString, "my-resource-1") - containsExactlyOne(toString, "my-resource-2") - containsExactlyOne(toString, "my-resource-3") - - }) - - t.Run("multiple resources - multiple dependents", func(t *testing.T) { - d := NewDependentList() - d.Add("MyResources", "my-resource-2") - d.Add("MyResources", "my-resource-4") - d.Add("YourResources", "your-resource-1") - d.Add("TheirResources", "their-resource-5") - d.Add("TheirResources", "their-resource-6") - assert.False(t, d.Empty()) - assert.ElementsMatch(t, []string{"MyResources", "YourResources", "TheirResources"}, d.PluralKinds()) - assert.ElementsMatch(t, []string{"my-resource-2", "my-resource-4"}, d.OfPluralKind("MyResources")) - assert.ElementsMatch(t, []string{"your-resource-1"}, d.OfPluralKind("YourResources")) - assert.ElementsMatch(t, []string{"their-resource-5", "their-resource-6"}, d.OfPluralKind("TheirResources")) - assert.ElementsMatch(t, []string{}, d.OfPluralKind("OtherKinds")) - toString := d.StringWithHeader("head with arg %s", "mom") - t.Log(toString) - containsExactlyOne(toString, "head with arg mom:") - containsExactlyOne(toString, "MyResources") - containsExactlyOne(toString, "my-resource-2") - containsExactlyOne(toString, "my-resource-4") - containsExactlyOne(toString, "YourResources") - containsExactlyOne(toString, "your-resource-1") - containsExactlyOne(toString, "TheirResources") - containsExactlyOne(toString, "their-resource-5") - containsExactlyOne(toString, "their-resource-6") - // ensure alphabetical ordering - isBefore(toString, "MyResources", "TheirResources") - isBefore(toString, "TheirResources", "YourResources") - }) -} diff --git a/pkg/util/exec/exec.go b/pkg/util/exec/exec.go index cd11f481f..d2e26d67b 100644 --- a/pkg/util/exec/exec.go +++ b/pkg/util/exec/exec.go @@ -276,9 +276,9 @@ func logOutput(stdout, stderr io.ReadCloser) { // The child processes should appropriately be outputting at the desired global level. Therefore, // we always log at INFO level here, so that log statements from child procs at higher levels // (e.g., WARNING) will still be displayed. We are relying on the child procs to output appropriately. - childLogger := capnslog.NewPackageLogger("github.com/rook/rook", "exec") + childLogger := capnslog.NewPackageLogger("github.com/rook/cassandra", "exec") if !childLogger.LevelAt(capnslog.INFO) { - rl, err := capnslog.GetRepoLogger("github.com/rook/rook") + rl, err := capnslog.GetRepoLogger("github.com/rook/cassandra") if err == nil { rl.SetLogLevel(map[string]capnslog.LogLevel{"exec": capnslog.INFO}) } diff --git a/pkg/util/exec/log.go b/pkg/util/exec/log.go index 5bd1aacc9..16e846236 100644 --- a/pkg/util/exec/log.go +++ b/pkg/util/exec/log.go @@ -17,4 +17,4 @@ package exec import "github.com/coreos/pkg/capnslog" -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "exec") +var logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "exec") diff --git a/pkg/util/file.go b/pkg/util/file.go index 3e034b278..00ce12a59 100644 --- a/pkg/util/file.go +++ b/pkg/util/file.go @@ -27,7 +27,7 @@ import ( "github.com/coreos/pkg/capnslog" ) -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "util") +var logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "util") func WriteFile(filePath string, contentBuffer bytes.Buffer) error { dir := filepath.Dir(filePath) diff --git a/pkg/util/flags/flags.go b/pkg/util/flags/flags.go index 2dbcc8463..32c525d56 100644 --- a/pkg/util/flags/flags.go +++ b/pkg/util/flags/flags.go @@ -28,7 +28,7 @@ import ( ) var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-flags") + logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "op-flags") ) func VerifyRequiredFlags(cmd *cobra.Command, requiredFlags []string) error { diff --git a/pkg/util/sys/device.go b/pkg/util/sys/device.go index 727bf6187..9e879234d 100644 --- a/pkg/util/sys/device.go +++ b/pkg/util/sys/device.go @@ -25,7 +25,7 @@ import ( "strings" "github.com/google/uuid" - "github.com/rook/rook/pkg/util/exec" + "github.com/rook/cassandra/pkg/util/exec" ) const ( diff --git a/pkg/util/sys/device_test.go b/pkg/util/sys/device_test.go index 10342f857..49636d31a 100644 --- a/pkg/util/sys/device_test.go +++ b/pkg/util/sys/device_test.go @@ -19,7 +19,7 @@ import ( "fmt" "testing" - exectest "github.com/rook/rook/pkg/util/exec/test" + exectest "github.com/rook/cassandra/pkg/util/exec/test" "github.com/stretchr/testify/assert" ) diff --git a/pkg/util/sys/kmod.go b/pkg/util/sys/kmod.go index 7acd72b1a..b6805d0aa 100644 --- a/pkg/util/sys/kmod.go +++ b/pkg/util/sys/kmod.go @@ -20,7 +20,7 @@ import ( "os/exec" "strings" - pkgexec "github.com/rook/rook/pkg/util/exec" + pkgexec "github.com/rook/cassandra/pkg/util/exec" ) func getKernelVersion() (string, error) { diff --git a/pkg/util/sys/log.go b/pkg/util/sys/log.go index fd4beef89..e0d5dd3b8 100644 --- a/pkg/util/sys/log.go +++ b/pkg/util/sys/log.go @@ -17,4 +17,4 @@ package sys import "github.com/coreos/pkg/capnslog" -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "sys") +var logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "sys") diff --git a/tests/README.md b/tests/README.md index 78630c9f2..a041335c1 100644 --- a/tests/README.md +++ b/tests/README.md @@ -76,17 +76,6 @@ These two scripts can install any version of Kubernetes you wish based on the `K To use an alternate version, simply set this variable before running the relevant `up` command from above. For example, if you wanted to use `v1.12.5`, you would run `export KUBE_VERSION=v1.12.5` first before running `up`. -### Install Helm -Use [helm.sh](/tests/scripts/helm.sh) to install Helm and set up Rook charts defined under `_output/charts` (generated by build): - -- To install and set up Helm charts for Rook run `tests/scripts/helm.sh up`. -- To clean up `tests/scripts/helm.sh clean`. - -**NOTE:** `*kubeadm.sh`, `minikube.sh` and `helm.sh` scripts depend on some artifacts under the `_output/` directory generated during build time, -these scripts should be run from project root. e.g., `tests/script/kubeadm.sh up`. - -**NOTE**: If Helm is not available in your `PATH`, Helm will be downloaded to a temporary directory (`/tmp/rook-tests-scripts-helm`) and used from that directory. - ## Run Tests From the project root do the following: @@ -116,7 +105,6 @@ See [environment.go](/tests/framework/installer/environment.go) for the availabl At least you should set the following variables. ```console -export TEST_HELM_PATH=/tmp/rook-tests-scripts-helm/linux-amd64/helm export TEST_BASE_DIR=WORKING_DIR export TEST_SCRATCH_DEVICE= # for example, TEST_SCRATCH_DEVICE=/dev/sdb ``` @@ -125,42 +113,7 @@ Please note that the integration test erases the contents of TEST_SCRATCH_DEVICE To run all integration tests: ```console -go test -v -timeout 7200s github.com/rook/rook/tests/integration +go test -v -timeout 7200s github.com/rook/cassandra/tests/integration ``` After running tests, you can get test logs under "tests/integration/_output". - -In addition, you can choose to test only one storage provider. For example, you can run Ceph tests as follows. - -```console -export STORAGE_PROVIDER_TESTS=ceph -go test -v -timeout 7200s github.com/rook/rook/tests/integration -``` - -To run a specific suite (uses regex): -```console -go test -v -timeout 1800s -run CephSmokeSuite github.com/rook/rook/tests/integration -``` - -To run specific tests inside a suite: -```console -go test -v -timeout 1800s -run CephSmokeSuite github.com/rook/rook/tests/integration -testify.m TestARookClusterInstallation_SmokeTest -``` - -### To run tests on OpenShift environment - -- Setup OpenShift environment and export KUBECONFIG before executing the tests. -- Make sure `oc` executable file is in the PATH. -- Only `CephSmokeSuite` is currently supported on OpenShift. -- Set few environment variables: -``` -export TEST_ENV_NAME=openshift -export TEST_STORAGE_CLASS=gp2 -export TEST_BASE_DIR=/tmp -export RETRY_MAX=40 -``` - -To run a `CephSmokeSuite` (uses regex): -```console -go test -v -timeout 1800s -run CephSmokeSuite github.com/rook/rook/tests/integration -``` diff --git a/tests/framework/clients/block.go b/tests/framework/clients/block.go deleted file mode 100644 index c7f8d5cad..000000000 --- a/tests/framework/clients/block.go +++ /dev/null @@ -1,194 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "context" - "fmt" - - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// BlockOperation is wrapper for k8s rook block operations -type BlockOperation struct { - k8sClient *utils.K8sHelper - manifests installer.CephManifests -} - -type BlockImage struct { - Name string `json:"imageName"` - PoolName string `json:"poolName"` - Size uint64 `json:"size"` - Device string `json:"device"` - MountPoint string `json:"mountPoint"` -} - -// CreateBlockOperation - Constructor to create BlockOperation - client to perform rook Block operations on k8s -func CreateBlockOperation(k8shelp *utils.K8sHelper, manifests installer.CephManifests) *BlockOperation { - return &BlockOperation{k8shelp, manifests} -} - -// BlockCreate Function to create a Block using Rook -// Input parameters - -// manifest - pod definition that creates a pvc in k8s - yaml should describe name and size of pvc being created -// size - not user for k8s implementation since its descried on the pvc yaml definition -// Output - k8s create pvc operation output and/or error -func (b *BlockOperation) Create(manifest string, size int) (string, error) { - args := []string{"apply", "-f", "-"} - result, err := b.k8sClient.KubectlWithStdin(manifest, args...) - if err != nil { - return "", fmt.Errorf("Unable to create block -- : %s", err) - - } - return result, nil - -} - -func (b *BlockOperation) CreateStorageClassAndPVC(pvcNamespace, poolName, storageClassName, reclaimPolicy, blockName, mode string) error { - if err := b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockPool(poolName, "1")); err != nil { - return err - } - if err := b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockStorageClass(poolName, storageClassName, reclaimPolicy)); err != nil { - return err - } - return b.k8sClient.ResourceOperation("apply", installer.GetPVC(blockName, pvcNamespace, storageClassName, mode, "1M")) -} - -func (b *BlockOperation) CreatePVC(namespace, claimName, storageClassName, mode, size string) error { - return b.k8sClient.ResourceOperation("apply", installer.GetPVC(claimName, namespace, storageClassName, mode, size)) -} - -func (b *BlockOperation) CreatePod(podName, claimName, namespace, mountPoint string, readOnly bool) error { - return b.k8sClient.ResourceOperation("apply", installer.GetPodWithVolume(podName, claimName, namespace, mountPoint, readOnly)) -} - -func (b *BlockOperation) CreateStorageClass(csi bool, poolName, storageClassName, reclaimPolicy, namespace string) error { - return b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockStorageClass(poolName, storageClassName, reclaimPolicy)) -} - -func (b *BlockOperation) DeletePVC(namespace, claimName string) error { - ctx := context.TODO() - logger.Infof("deleting pvc %q from namespace %q", claimName, namespace) - return b.k8sClient.Clientset.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, claimName, metav1.DeleteOptions{}) -} - -func (b *BlockOperation) CreatePVCRestore(namespace, claimName, snapshotName, storageClassName, mode, size string) error { - return b.k8sClient.ResourceOperation("apply", installer.GetPVCRestore(claimName, snapshotName, namespace, storageClassName, mode, size)) -} - -func (b *BlockOperation) CreatePVCClone(namespace, cloneClaimName, parentClaimName, storageClassName, mode, size string) error { - return b.k8sClient.ResourceOperation("apply", installer.GetPVCClone(cloneClaimName, parentClaimName, namespace, storageClassName, mode, size)) -} - -func (b *BlockOperation) CreateSnapshotClass(snapshotClassName, deletePolicy, namespace string) error { - return b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockSnapshotClass(snapshotClassName, deletePolicy)) -} - -func (b *BlockOperation) DeleteSnapshotClass(snapshotClassName, deletePolicy, namespace string) error { - return b.k8sClient.ResourceOperation("delete", b.manifests.GetBlockSnapshotClass(snapshotClassName, deletePolicy)) -} - -func (b *BlockOperation) CreateSnapshot(snapshotName, claimName, snapshotClassName, namespace string) error { - return b.k8sClient.ResourceOperation("apply", installer.GetSnapshot(snapshotName, claimName, snapshotClassName, namespace)) -} - -func (b *BlockOperation) DeleteSnapshot(snapshotName, claimName, snapshotClassName, namespace string) error { - return b.k8sClient.ResourceOperation("delete", installer.GetSnapshot(snapshotName, claimName, snapshotClassName, namespace)) -} - -func (b *BlockOperation) DeleteStorageClass(storageClassName string) error { - ctx := context.TODO() - logger.Infof("deleting storage class %q", storageClassName) - err := b.k8sClient.Clientset.StorageV1().StorageClasses().Delete(ctx, storageClassName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - return fmt.Errorf("failed to delete storage class %q. %v", storageClassName, err) - } - - return nil -} - -// BlockDelete Function to delete a Block using Rook -// Input parameters - -// manifest - pod definition where pvc is described - delete is run on the the yaml definition -// Output - k8s delete pvc operation output and/or error -func (b *BlockOperation) DeleteBlock(manifest string) (string, error) { - args := []string{"delete", "-f", "-"} - result, err := b.k8sClient.KubectlWithStdin(manifest, args...) - if err != nil { - return "", fmt.Errorf("Unable to delete block -- : %s", err) - - } - return result, nil - -} - -// List Function to list all the block images in all pools -func (b *BlockOperation) ListAllImages(clusterInfo *client.ClusterInfo) ([]BlockImage, error) { - // first list all the pools so that we can retrieve images from all pools - pools, err := client.ListPoolSummaries(b.k8sClient.MakeContext(), clusterInfo) - if err != nil { - return nil, fmt.Errorf("failed to list pools: %+v", err) - } - - // for each pool, get further details about all the images in the pool - images := []BlockImage{} - for _, p := range pools { - cephImages, err := b.ListImagesInPool(clusterInfo, p.Name) - if err != nil { - return nil, fmt.Errorf("failed to get images from pool %s: %+v", p.Name, err) - } - images = append(images, cephImages...) - } - return images, nil -} - -// List Function to list all the block images in a pool -func (b *BlockOperation) ListImagesInPool(clusterInfo *client.ClusterInfo, poolName string) ([]BlockImage, error) { - // for each pool, get further details about all the images in the pool - images := []BlockImage{} - cephImages, err := client.ListImages(b.k8sClient.MakeContext(), clusterInfo, poolName) - if err != nil { - return nil, fmt.Errorf("failed to get images from pool %s: %+v", poolName, err) - } - - for _, image := range cephImages { - // add the current image's details to the result set - newImage := BlockImage{ - Name: image.Name, - PoolName: poolName, - Size: image.Size, - } - images = append(images, newImage) - } - - return images, nil -} - -// DeleteBlockImage Function to list all the blocks created/being managed by rook -func (b *BlockOperation) DeleteBlockImage(clusterInfo *client.ClusterInfo, image BlockImage) error { - context := b.k8sClient.MakeContext() - return client.DeleteImage(context, clusterInfo, image.Name, image.PoolName) -} - -// CreateClientPod starts a pod that should have a block PVC. -func (b *BlockOperation) CreateClientPod(manifest string) error { - return b.k8sClient.ResourceOperation("apply", manifest) -} diff --git a/tests/framework/clients/bucket.go b/tests/framework/clients/bucket.go deleted file mode 100644 index 8f3edeb86..000000000 --- a/tests/framework/clients/bucket.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - b64 "encoding/base64" - "fmt" - - bktv1alpha1 "github.com/kube-object-storage/lib-bucket-provisioner/pkg/apis/objectbucket.io/v1alpha1" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" -) - -// BucketOperation is a wrapper for rook bucket operations -type BucketOperation struct { - k8sh *utils.K8sHelper - manifests installer.CephManifests -} - -// CreateBucketOperation creates a new bucket client -func CreateBucketOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *BucketOperation { - return &BucketOperation{k8sh, manifests} -} - -func (b *BucketOperation) CreateBucketStorageClass(namespace string, storeName string, storageClassName string, reclaimPolicy string, region string) error { - return b.k8sh.ResourceOperation("create", b.manifests.GetBucketStorageClass(storeName, storageClassName, reclaimPolicy, region)) -} - -func (b *BucketOperation) DeleteBucketStorageClass(namespace string, storeName string, storageClassName string, reclaimPolicy string, region string) error { - err := b.k8sh.ResourceOperation("delete", b.manifests.GetBucketStorageClass(storeName, storageClassName, reclaimPolicy, region)) - return err -} - -func (b *BucketOperation) CreateObc(obcName string, storageClassName string, bucketName string, maxObject string, createBucket bool) error { - return b.k8sh.ResourceOperation("create", b.manifests.GetOBC(obcName, storageClassName, bucketName, maxObject, createBucket)) -} - -func (b *BucketOperation) DeleteObc(obcName string, storageClassName string, bucketName string, maxObject string, createBucket bool) error { - return b.k8sh.ResourceOperation("delete", b.manifests.GetOBC(obcName, storageClassName, bucketName, maxObject, createBucket)) -} - -func (b *BucketOperation) UpdateObc(obcName string, storageClassName string, bucketName string, maxObject string, createBucket bool) error { - return b.k8sh.ResourceOperation("apply", b.manifests.GetOBC(obcName, storageClassName, bucketName, maxObject, createBucket)) -} - -// CheckOBC, returns true if the obc, secret and configmap are all in the "check" state, -// and returns false if any of these resources are not in the "check" state. -// Check state values: -// "created", all must exist, -// "bound", all must exist and OBC in Bound phase -// "deleted", all must be missing. -func (b *BucketOperation) CheckOBC(obcName, check string) bool { - resources := []string{"obc", "secret", "configmap"} - shouldBeBound := (check == "bound") - shouldExist := (shouldBeBound || check == "created") // bound implies created - - for _, res := range resources { - _, err := b.k8sh.GetResource(res, obcName) - // note: we assume a `GetResource` error is a missing resource - if shouldExist == (err != nil) { - return false - } - logger.Infof("%s %s %s", res, obcName, check) - } - logger.Infof("%s resources %v all %s", obcName, resources, check) - - if shouldBeBound { - // OBC should be in bound phase as well as existing - state, _ := b.k8sh.GetResource("obc", obcName, "--output", "jsonpath={.status.phase}") - boundPhase := bktv1alpha1.ObjectBucketClaimStatusPhaseBound // i.e., "Bound" - if state != boundPhase { - logger.Infof(`resources exist, but OBC is not in %q phase: %q`, boundPhase, state) - return false - } - - // Regression test: OBC should have spec.objectBucketName set - obName, _ := b.k8sh.GetResource("obc", obcName, "--output", "jsonpath={.spec.objectBucketName}") - if obName == "" { - logger.Error("failed regression: OBC spec.objectBucketName is not set") - return false - } - // Regression test: OB should have claim ref to OBC - refName, _ := b.k8sh.GetResource("ob", obName, "--output", "jsonpath={.spec.claimRef.name}") - if refName != obcName { - logger.Errorf("failed regression: OB spec.claimRef.name (%q) does not match expected OBC name (%q)", refName, obcName) - return false - } - - logger.Infof("OBC is %q", boundPhase) - } - - return true -} - -// Fetch SecretKey, AccessKey for s3 client. -func (b *BucketOperation) GetAccessKey(obcName string) (string, error) { - args := []string{"get", "secret", obcName, "-o", "jsonpath={@.data.AWS_ACCESS_KEY_ID}"} - AccessKey, err := b.k8sh.Kubectl(args...) - if err != nil { - return "", fmt.Errorf("Unable to find access key -- %s", err) - } - decode, _ := b64.StdEncoding.DecodeString(AccessKey) - return string(decode), nil -} - -func (b *BucketOperation) GetSecretKey(obcName string) (string, error) { - args := []string{"get", "secret", obcName, "-o", "jsonpath={@.data.AWS_SECRET_ACCESS_KEY}"} - SecretKey, err := b.k8sh.Kubectl(args...) - if err != nil { - return "", fmt.Errorf("Unable to find secret key-- %s", err) - } - decode, _ := b64.StdEncoding.DecodeString(SecretKey) - return string(decode), nil - -} - -// Checks whether MaxObject is updated for ob -func (b *BucketOperation) CheckOBMaxObject(obcName, maxobject string) bool { - obName, _ := b.k8sh.GetResource("obc", obcName, "--output", "jsonpath={.spec.objectBucketName}") - fetchMaxObject, _ := b.k8sh.GetResource("ob", obName, "--output", "jsonpath={.spec.endpoint.additionalConfig.maxObjects}") - return maxobject == fetchMaxObject -} diff --git a/tests/framework/clients/client.go b/tests/framework/clients/client.go deleted file mode 100644 index 2b3834b23..000000000 --- a/tests/framework/clients/client.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "context" - "fmt" - "time" - - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ClientOperation is a wrapper for k8s rook file operations -type ClientOperation struct { - k8sh *utils.K8sHelper - manifests installer.CephManifests -} - -// CreateClientOperation Constructor to create ClientOperation - client to perform rook file system operations on k8s -func CreateClientOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *ClientOperation { - return &ClientOperation{k8sh, manifests} -} - -// Create creates a client in Rook -func (c *ClientOperation) Create(name, namespace string, caps map[string]string) error { - logger.Infof("creating the client via CRD") - if err := c.k8sh.ResourceOperation("apply", c.manifests.GetClient(name, caps)); err != nil { - return err - } - return nil -} - -// Delete deletes a client in Rook -func (c *ClientOperation) Delete(name, namespace string) error { - ctx := context.TODO() - options := &metav1.DeleteOptions{} - logger.Infof("Deleting filesystem %s in namespace %s", name, namespace) - err := c.k8sh.RookClientset.CephV1().CephClients(namespace).Delete(ctx, name, *options) - if err != nil && !errors.IsNotFound(err) { - return err - } - - logger.Infof("Deleted client %s in namespace %s", name, namespace) - return nil -} - -// Get shows user created in Rook -func (c *ClientOperation) Get(clusterInfo *client.ClusterInfo, clientName string) (key string, error error) { - context := c.k8sh.MakeContext() - key, err := client.AuthGetKey(context, clusterInfo, clientName) - if err != nil { - return "", fmt.Errorf("failed to get client %s: %+v", clientName, err) - } - return key, nil -} - -// Update updates provided user capabilities -func (c *ClientOperation) Update(clusterInfo *client.ClusterInfo, clientName string, caps map[string]string) (updatedcaps map[string]string, error error) { - context := c.k8sh.MakeContext() - logger.Infof("updating the client via CRD") - if err := c.k8sh.ResourceOperation("apply", c.manifests.GetClient(clientName, caps)); err != nil { - return nil, err - } - - for i := 0; i < 30; i++ { - updatedcaps, _ = client.AuthGetCaps(context, clusterInfo, "client."+clientName) - if caps["mon"] == updatedcaps["mon"] { - logger.Infof("Finished updating the client via CRD") - return updatedcaps, nil - } - logger.Info("Waiting for client CRD to finish updating caps") - time.Sleep(2 * time.Second) - } - - return nil, fmt.Errorf("Unable to update client") -} diff --git a/tests/framework/clients/cluster.go b/tests/framework/clients/cluster.go deleted file mode 100644 index b812ad031..000000000 --- a/tests/framework/clients/cluster.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2017 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "fmt" - - "github.com/rook/rook/pkg/daemon/ceph/client" -) - -// IsClusterHealthy determines if the Rook cluster is currently healthy or not. -func IsClusterHealthy(testClient *TestClient, namespace string) (bool, error) { - - status, err := testClient.Status(namespace) - if err != nil { - return false, err - } - logger.Infof("cluster status: %+v", status) - - // verify all mons are in quorum - if len(status.Quorum) == 0 { - return false, fmt.Errorf("too few monitors: %+v", status) - } - for _, mon := range status.MonMap.Mons { - if !monInQuorum(mon, status.Quorum) { - return false, fmt.Errorf("mon %s not in quorum: %v", mon.Name, status.Quorum) - } - } - - // verify there are OSDs and they are all up/in - totalOSDs := status.OsdMap.OsdMap.NumOsd - if totalOSDs == 0 { - return false, fmt.Errorf("no OSDs: %+v", status) - } - if status.OsdMap.OsdMap.NumInOsd != totalOSDs || status.OsdMap.OsdMap.NumUpOsd != totalOSDs { - return false, fmt.Errorf("not all OSDs are up/in: %+v", status) - } - - // verify MGRs are available - if !status.MgrMap.Available { - return false, fmt.Errorf("MGRs are not available: %+v", status) - } - - // verify that all PGs are in the active+clean state (0 PGs is OK because that means no pools - // have been created yet) - if status.PgMap.NumPgs > 0 { - activeCleanCount := 0 - for _, pg := range status.PgMap.PgsByState { - if pg.StateName == "active+clean" { - activeCleanCount = pg.Count - break - } - } - if activeCleanCount != status.PgMap.NumPgs { - return false, fmt.Errorf("not all PGs are active+clean: %+v", status.PgMap) - } - } - - // cluster passed all the basic health checks, seems healthy - return true, nil -} - -func monInQuorum(mon client.MonMapEntry, quorum []int) bool { - for _, entry := range quorum { - if entry == mon.Rank { - return true - } - } - return false -} diff --git a/tests/framework/clients/filesystem.go b/tests/framework/clients/filesystem.go deleted file mode 100644 index 049e06fdc..000000000 --- a/tests/framework/clients/filesystem.go +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "context" - "fmt" - - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// FilesystemOperation is a wrapper for k8s rook file operations -type FilesystemOperation struct { - k8sh *utils.K8sHelper - manifests installer.CephManifests -} - -// CreateFilesystemOperation Constructor to create FilesystemOperation - client to perform rook file system operations on k8s -func CreateFilesystemOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *FilesystemOperation { - return &FilesystemOperation{k8sh, manifests} -} - -// Create creates a filesystem in Rook -func (f *FilesystemOperation) Create(name, namespace string, activeCount int) error { - logger.Infof("creating the filesystem via CRD") - if err := f.k8sh.ResourceOperation("apply", f.manifests.GetFilesystem(name, activeCount)); err != nil { - return err - } - - logger.Infof("Make sure rook-ceph-mds pod is running") - err := f.k8sh.WaitForLabeledPodsToRun(fmt.Sprintf("rook_file_system=%s", name), namespace) - assert.Nil(f.k8sh.T(), err) - - assert.True(f.k8sh.T(), f.k8sh.CheckPodCountAndState("rook-ceph-mds", namespace, activeCount*2, "Running"), - "Make sure there are four rook-ceph-mds pods present in Running state") - - return nil -} - -// CreateStorageClass creates a storage class for CephFS clients -func (f *FilesystemOperation) CreateStorageClass(fsName, systemNamespace, namespace, storageClassName string) error { - return f.k8sh.ResourceOperation("apply", f.manifests.GetFileStorageClass(fsName, storageClassName)) -} - -// CreateSnapshotClass creates a snapshot class for CephFS clients -func (f *FilesystemOperation) CreateSnapshotClass(snapshotClassName, reclaimPolicy, namespace string) error { - return f.k8sh.ResourceOperation("apply", f.manifests.GetFileStorageSnapshotClass(snapshotClassName, reclaimPolicy)) -} - -// CreatePVCRestore creates a pvc from snapshot -func (f *FilesystemOperation) CreatePVCRestore(namespace, claimName, snapshotName, storageClassName, mode, size string) error { - return f.k8sh.ResourceOperation("apply", installer.GetPVCRestore(claimName, snapshotName, namespace, storageClassName, mode, size)) -} - -// CreatePVCClone creates a pvc from pvc -func (f *FilesystemOperation) CreatePVCClone(namespace, cloneClaimName, parentClaimName, storageClassName, mode, size string) error { - return f.k8sh.ResourceOperation("apply", installer.GetPVCClone(cloneClaimName, parentClaimName, namespace, storageClassName, mode, size)) -} - -// CreateSnapshot creates a snapshot from pvc -func (f *FilesystemOperation) CreateSnapshot(snapshotName, claimName, snapshotClassName, namespace string) error { - return f.k8sh.ResourceOperation("apply", installer.GetSnapshot(snapshotName, claimName, snapshotClassName, namespace)) -} - -// DeleteSnapshot deletes the snapshot -func (f *FilesystemOperation) DeleteSnapshot(snapshotName, claimName, snapshotClassName, namespace string) error { - return f.k8sh.ResourceOperation("delete", installer.GetSnapshot(snapshotName, claimName, snapshotClassName, namespace)) -} - -func (f *FilesystemOperation) DeletePVC(namespace, claimName string) error { - ctx := context.TODO() - logger.Infof("deleting pvc %q from namespace %q", claimName, namespace) - return f.k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, claimName, metav1.DeleteOptions{}) -} - -func (f *FilesystemOperation) DeleteStorageClass(storageClassName string) error { - ctx := context.TODO() - logger.Infof("deleting storage class %q", storageClassName) - err := f.k8sh.Clientset.StorageV1().StorageClasses().Delete(ctx, storageClassName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - return fmt.Errorf("failed to delete storage class %q. %v", storageClassName, err) - } - - return nil -} - -func (f *FilesystemOperation) CreatePVC(namespace, claimName, storageClassName, mode, size string) error { - return f.k8sh.ResourceOperation("apply", installer.GetPVC(claimName, namespace, storageClassName, mode, size)) -} - -func (f *FilesystemOperation) CreatePod(podName, claimName, namespace, mountPoint string, readOnly bool) error { - return f.k8sh.ResourceOperation("apply", installer.GetPodWithVolume(podName, claimName, namespace, mountPoint, readOnly)) -} - -func (f *FilesystemOperation) DeleteSnapshotClass(snapshotClassName, deletePolicy, namespace string) error { - return f.k8sh.ResourceOperation("delete", f.manifests.GetFileStorageSnapshotClass(snapshotClassName, deletePolicy)) -} - -// ScaleDown scales down the number of active metadata servers of a filesystem in Rook -func (f *FilesystemOperation) ScaleDown(name, namespace string) error { - logger.Infof("scaling down the number of filesystem active metadata servers via CRD") - if err := f.k8sh.ResourceOperation("apply", f.manifests.GetFilesystem(name, 1)); err != nil { - return err - } - - assert.True(f.k8sh.T(), f.k8sh.CheckPodCountAndState("rook-ceph-mds", namespace, 2, "Running"), - "Make sure there are two rook-ceph-mds pods present in Running state") - - return nil -} - -// Delete deletes a filesystem in Rook -func (f *FilesystemOperation) Delete(name, namespace string) error { - ctx := context.TODO() - options := &metav1.DeleteOptions{} - logger.Infof("Deleting filesystem %s in namespace %s", name, namespace) - err := f.k8sh.RookClientset.CephV1().CephFilesystems(namespace).Delete(ctx, name, *options) - if err != nil && !errors.IsNotFound(err) { - return err - } - - crdCheckerFunc := func() error { - _, err := f.k8sh.RookClientset.CephV1().CephFilesystems(namespace).Get(ctx, name, metav1.GetOptions{}) - return err - } - - logger.Infof("Deleted filesystem %s in namespace %s", name, namespace) - return f.k8sh.WaitForCustomResourceDeletion(namespace, name, crdCheckerFunc) -} - -// List lists filesystems in Rook -func (f *FilesystemOperation) List(namespace string) ([]client.CephFilesystem, error) { - context := f.k8sh.MakeContext() - clusterInfo := client.AdminClusterInfo(namespace) - filesystems, err := client.ListFilesystems(context, clusterInfo) - if err != nil { - return nil, fmt.Errorf("failed to list pools: %+v", err) - } - return filesystems, nil -} diff --git a/tests/framework/clients/nfs.go b/tests/framework/clients/nfs.go deleted file mode 100644 index c3b7ab2d8..000000000 --- a/tests/framework/clients/nfs.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "context" - "fmt" - - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// NFSOperation is a wrapper for k8s rook file operations -type NFSOperation struct { - k8sh *utils.K8sHelper - manifests installer.CephManifests -} - -// CreateNFSOperation Constructor to create NFSOperation - client to perform ceph nfs operations on k8s -func CreateNFSOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *NFSOperation { - return &NFSOperation{k8sh, manifests} -} - -// Create creates a filesystem in Rook -func (n *NFSOperation) Create(namespace, name, pool string, daemonCount int) error { - - logger.Infof("creating the NFS daemons via CRD") - if err := n.k8sh.ResourceOperation("apply", n.manifests.GetNFS(name, pool, daemonCount)); err != nil { - return err - } - - logger.Infof("Make sure rook-ceph-nfs pod is running") - err := n.k8sh.WaitForLabeledPodsToRun(fmt.Sprintf("ceph_nfs=%s", name), namespace) - assert.Nil(n.k8sh.T(), err) - - assert.True(n.k8sh.T(), n.k8sh.CheckPodCountAndState("rook-ceph-nfs", namespace, daemonCount, "Running"), - "Make sure all nfs daemon pods are in Running state") - - return nil -} - -// Delete deletes a filesystem in Rook -func (n *NFSOperation) Delete(namespace, name string) error { - ctx := context.TODO() - options := &metav1.DeleteOptions{} - logger.Infof("Deleting nfs %s in namespace %s", name, namespace) - err := n.k8sh.RookClientset.CephV1().CephNFSes(namespace).Delete(ctx, name, *options) - if err != nil && !errors.IsNotFound(err) { - return err - } - - logger.Infof("Deleted nfs %s in namespace %s", name, namespace) - return nil -} diff --git a/tests/framework/clients/object.go b/tests/framework/clients/object.go deleted file mode 100644 index 587d43cc4..000000000 --- a/tests/framework/clients/object.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "fmt" - - "github.com/coreos/pkg/capnslog" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" -) - -const rgwPort = 80 - -var logger = capnslog.NewPackageLogger("github.com/rook/rook/tests", "clients") - -// ObjectOperation is wrapper for k8s rook object operations -type ObjectOperation struct { - k8sh *utils.K8sHelper - manifests installer.CephManifests -} - -// CreateObjectOperation creates new rook object client -func CreateObjectOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *ObjectOperation { - return &ObjectOperation{k8sh, manifests} -} - -// ObjectCreate Function to create a object store in rook -func (o *ObjectOperation) Create(namespace, storeName string, replicaCount int32, tlsEnable bool) error { - - logger.Info("creating the object store via CRD") - if err := o.k8sh.ResourceOperation("apply", o.manifests.GetObjectStore(storeName, int(replicaCount), rgwPort, tlsEnable)); err != nil { - return err - } - - // Starting an object store takes longer than the average operation, so add more retries - err := o.k8sh.WaitForLabeledPodsToRunWithRetries(fmt.Sprintf("rook_object_store=%s", storeName), namespace, 40) - if err != nil { - return fmt.Errorf("rgw did not start via crd. %+v", err) - } - - // create the external service - return o.k8sh.CreateExternalRGWService(namespace, storeName) -} - -func (o *ObjectOperation) Delete(namespace, storeName string) error { - - logger.Infof("Deleting the object store via CRD") - if err := o.k8sh.DeleteResource("-n", namespace, "CephObjectStore", storeName); err != nil { - return err - } - - if !o.k8sh.WaitUntilPodWithLabelDeleted(fmt.Sprintf("rook_object_store=%s", storeName), namespace) { - return fmt.Errorf("rgw did not stop via crd") - } - return nil -} - -// Need to improve the below function for better error handling -func (o *ObjectOperation) GetEndPointUrl(namespace string, storeName string) (string, error) { - args := []string{"get", "svc", "-n", namespace, "-l", fmt.Sprintf("rgw=%s", storeName), "-o", "jsonpath={.items[*].spec.clusterIP}"} - EndPointUrl, err := o.k8sh.Kubectl(args...) - if err != nil { - return "", fmt.Errorf("Unable to find rgw end point-- %s", err) - } - return fmt.Sprintf("%s:%d", EndPointUrl, rgwPort), nil -} diff --git a/tests/framework/clients/object_user.go b/tests/framework/clients/object_user.go deleted file mode 100644 index 6849624ef..000000000 --- a/tests/framework/clients/object_user.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "context" - "fmt" - "strings" - - "github.com/rook/rook/pkg/daemon/ceph/client" - rgw "github.com/rook/rook/pkg/operator/ceph/object" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ObjectUserOperation is wrapper for k8s rook object user operations -type ObjectUserOperation struct { - k8sh *utils.K8sHelper - manifests installer.CephManifests -} - -// CreateObjectUserOperation creates new rook object user client -func CreateObjectUserOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *ObjectUserOperation { - return &ObjectUserOperation{k8sh, manifests} -} - -// ObjectUserGet Function to get the details of an object user from radosgw -func (o *ObjectUserOperation) GetUser(namespace string, store string, userid string) (*rgw.ObjectUser, error) { - ctx := o.k8sh.MakeContext() - clusterInfo := client.AdminClusterInfo(namespace) - objectStore, err := o.k8sh.RookClientset.CephV1().CephObjectStores(namespace).Get(context.TODO(), store, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to get objectstore info: %+v", err) - } - rgwcontext, err := rgw.NewMultisiteContext(ctx, clusterInfo, objectStore) - if err != nil { - return nil, fmt.Errorf("failed to get RGW context: %+v", err) - } - userinfo, _, err := rgw.GetUser(rgwcontext, userid) - if err != nil { - return nil, fmt.Errorf("failed to get user info: %+v", err) - } - return userinfo, nil -} - -// UserSecretExists Function to check that user secret was created -func (o *ObjectUserOperation) UserSecretExists(namespace string, store string, userid string) bool { - message, err := o.k8sh.GetResource("-n", namespace, "secrets", "-l", "rook_object_store="+store, "-l", "user="+userid) - //GetResource(blah) returns success if blah is or is not found. - //err = success and found_sec not "No resources found." means it was found - //err = success and found_sec contains "No resources found." means it was not found - //err != success is an other error - if err == nil && !strings.Contains(message, "No resources found") { - logger.Infof("Object User Secret Exists") - return true - } - logger.Infof("Unable to find user secret") - return false -} - -// ObjectUserCreate Function to create a object store user in rook -func (o *ObjectUserOperation) Create(namespace string, userid string, displayName string, store string) error { - - logger.Infof("creating the object store user via CRD") - if err := o.k8sh.ResourceOperation("apply", o.manifests.GetObjectStoreUser(userid, displayName, store)); err != nil { - return err - } - return nil -} - -func (o *ObjectUserOperation) Delete(namespace string, userid string) error { - - logger.Infof("Deleting the object store user via CRD") - if err := o.k8sh.DeleteResource("-n", namespace, "CephObjectStoreUser", userid); err != nil { - return err - } - return nil -} diff --git a/tests/framework/clients/pool.go b/tests/framework/clients/pool.go deleted file mode 100644 index 8571a4ad4..000000000 --- a/tests/framework/clients/pool.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "context" - "fmt" - "strconv" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PoolOperation is a wrapper for rook pool operations -type PoolOperation struct { - k8sh *utils.K8sHelper - manifests installer.CephManifests -} - -// CreatePoolOperation creates a new pool client -func CreatePoolOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *PoolOperation { - return &PoolOperation{k8sh, manifests} -} - -func (p *PoolOperation) Create(name, namespace string, replicas int) error { - return p.createOrUpdatePool(name, namespace, "apply", replicas) -} - -func (p *PoolOperation) Update(name, namespace string, replicas int) error { - return p.createOrUpdatePool(name, namespace, "apply", replicas) -} - -func (p *PoolOperation) createOrUpdatePool(name, namespace, action string, replicas int) error { - return p.k8sh.ResourceOperation(action, p.manifests.GetBlockPool(name, strconv.Itoa(replicas))) -} - -func (p *PoolOperation) ListCephPools(clusterInfo *client.ClusterInfo) ([]client.CephStoragePoolSummary, error) { - context := p.k8sh.MakeContext() - pools, err := client.ListPoolSummaries(context, clusterInfo) - if err != nil { - return nil, fmt.Errorf("failed to list pools: %+v", err) - } - return pools, nil -} - -func (p *PoolOperation) GetCephPoolDetails(clusterInfo *client.ClusterInfo, name string) (client.CephStoragePoolDetails, error) { - context := p.k8sh.MakeContext() - details, err := client.GetPoolDetails(context, clusterInfo, name) - if err != nil { - return client.CephStoragePoolDetails{}, fmt.Errorf("failed to get pool %s details: %+v", name, err) - } - return details, nil -} - -func (p *PoolOperation) ListPoolCRDs(namespace string) ([]cephv1.CephBlockPool, error) { - ctx := context.TODO() - pools, err := p.k8sh.RookClientset.CephV1().CephBlockPools(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return nil, nil - } - return nil, err - } - - return pools.Items, nil -} - -func (p *PoolOperation) PoolCRDExists(namespace, name string) (bool, error) { - ctx := context.TODO() - _, err := p.k8sh.RookClientset.CephV1().CephBlockPools(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return false, nil - } - return false, err - } - return true, nil -} - -func (p *PoolOperation) CephPoolExists(namespace, name string) (bool, error) { - clusterInfo := client.AdminClusterInfo(namespace) - pools, err := p.ListCephPools(clusterInfo) - if err != nil { - return false, err - } - for _, pool := range pools { - if pool.Name == name { - return true, nil - } - } - return false, nil -} - -// DeletePool deletes a pool after deleting all the block images contained by the pool -func (p *PoolOperation) DeletePool(blockClient *BlockOperation, clusterInfo *client.ClusterInfo, poolName string) error { - ctx := context.TODO() - // Delete all the images in a pool - logger.Infof("listing images in pool %q", poolName) - blockImagesList, _ := blockClient.ListImagesInPool(clusterInfo, poolName) - for _, blockImage := range blockImagesList { - logger.Infof("force deleting block image %q in pool %q", blockImage, poolName) - max := 10 - // Wait and retry up to 10 times/seconds to delete RBD images - for i := 0; i < max; i++ { - err := blockClient.DeleteBlockImage(clusterInfo, blockImage) - if err == nil { - break - } - logger.Infof("failed deleting image %q from %q. %v", blockImage, poolName, err) - time.Sleep(2 * time.Second) - if i == max-1 { - return fmt.Errorf("gave up waiting for image %q from %q to be deleted. %v", blockImage, poolName, err) - } - } - } - - logger.Infof("deleting pool CR %q", poolName) - err := p.k8sh.RookClientset.CephV1().CephBlockPools(clusterInfo.Namespace).Delete(ctx, poolName, metav1.DeleteOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return nil - } - return fmt.Errorf("failed to delete pool CR. %v", err) - } - - crdCheckerFunc := func() error { - _, err := p.k8sh.RookClientset.CephV1().CephBlockPools(clusterInfo.Namespace).Get(ctx, poolName, metav1.GetOptions{}) - return err - } - - return p.k8sh.WaitForCustomResourceDeletion(clusterInfo.Namespace, poolName, crdCheckerFunc) -} diff --git a/tests/framework/clients/rbd-mirror.go b/tests/framework/clients/rbd-mirror.go deleted file mode 100644 index 5e9d0bc2e..000000000 --- a/tests/framework/clients/rbd-mirror.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "context" - - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// RBDMirrorOperation is a wrapper for k8s rook rbd mirror operations -type RBDMirrorOperation struct { - k8sh *utils.K8sHelper - manifests installer.CephManifests -} - -// CreateRBDMirrorOperation Constructor to create RBDMirrorOperation - client to perform ceph rbd mirror operations on k8s -func CreateRBDMirrorOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *RBDMirrorOperation { - return &RBDMirrorOperation{k8sh, manifests} -} - -// Create creates a rbd-mirror in Rook -func (r *RBDMirrorOperation) Create(namespace, name string, daemonCount int) error { - - logger.Infof("creating the RBDMirror daemons via CRD") - if err := r.k8sh.ResourceOperation("apply", r.manifests.GetRBDMirror(name, daemonCount)); err != nil { - return err - } - - logger.Infof("Make sure rook-ceph-rbd-mirror pod is running") - err := r.k8sh.WaitForLabeledPodsToRun("app=rook-ceph-rbd-mirror", namespace) - assert.Nil(r.k8sh.T(), err) - - assert.True(r.k8sh.T(), r.k8sh.CheckPodCountAndState("rook-ceph-rbd-mirror", namespace, daemonCount, "Running"), - "Make sure all rbd-mirror daemon pods are in Running state") - - return nil -} - -// Delete deletes a rbd-mirror in Rook -func (r *RBDMirrorOperation) Delete(namespace, name string) error { - ctx := context.TODO() - options := &metav1.DeleteOptions{} - logger.Infof("Deleting rbd-mirror %s in namespace %s", name, namespace) - err := r.k8sh.RookClientset.CephV1().CephRBDMirrors(namespace).Delete(ctx, name, *options) - if err != nil && !errors.IsNotFound(err) { - return err - } - - logger.Infof("Deleted rbd-mirror %s in namespace %s", name, namespace) - return nil -} diff --git a/tests/framework/clients/read_write.go b/tests/framework/clients/read_write.go deleted file mode 100644 index bf97a1620..000000000 --- a/tests/framework/clients/read_write.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "fmt" - - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" -) - -// ReadWriteOperation is a wrapper for k8s rook file operations -type ReadWriteOperation struct { - k8sh *utils.K8sHelper -} - -// CreateReadWriteOperation Constructor to create ReadWriteOperation - client to perform rook file system operations on k8s -func CreateReadWriteOperation(k8sh *utils.K8sHelper) *ReadWriteOperation { - return &ReadWriteOperation{k8sh: k8sh} -} - -// CreateWriteClient Function to create a nfs client in rook -func (f *ReadWriteOperation) CreateWriteClient(volName string) ([]string, error) { - logger.Infof("creating the filesystem via replication controller") - writerSpec := getDeployment(volName) - - if err := f.k8sh.ResourceOperation("apply", writerSpec); err != nil { - return nil, err - } - - assert.True(f.k8sh.T(), f.k8sh.CheckPodCountAndState("read-write-test", "default", 2, "Running"), - "Make sure there are two read-write-test pods present in Running state") - - podList, err := f.k8sh.GetPodNamesForApp("read-write-test", "default") - if err != nil { - return nil, err - } - - return podList, nil -} - -// Delete Function to delete a nfs consuming pod in rook -func (f *ReadWriteOperation) Delete() error { - return f.k8sh.DeleteResource("deployment", "read-write-test") -} - -// Read Function to read from nfs mount point created by rook ,i.e. Read data from a pod that has an nfs export mounted -func (f *ReadWriteOperation) Read(name string) (string, error) { - rd := "/mnt/data" - - args := []string{"exec", name} - - args = append(args, "--", "cat", rd) - - result, err := f.k8sh.Kubectl(args...) - if err != nil { - return "", fmt.Errorf("unable to write data to pod -- : %s", err) - - } - return result, nil -} - -func getDeployment(volName string) string { - return `apiVersion: apps/v1 -kind: Deployment -metadata: - name: read-write-test -spec: - replicas: 2 - selector: - matchLabels: - app: read-write-test - template: - metadata: - labels: - app: read-write-test - spec: - containers: - - image: alpine - command: - - sh - - -c - - 'while true; do hostname > /mnt/data; sleep 3; done' - name: alpine - volumeMounts: - - name: test-vol - mountPath: "/mnt" - volumes: - - name: test-vol - persistentVolumeClaim: - claimName: ` + volName + ` -` -} diff --git a/tests/framework/clients/test_client.go b/tests/framework/clients/test_client.go deleted file mode 100644 index ef26ef022..000000000 --- a/tests/framework/clients/test_client.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clients - -import ( - "fmt" - - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" -) - -// TestClient is a wrapper for test client, containing interfaces for all rook operations -type TestClient struct { - BlockClient *BlockOperation - FSClient *FilesystemOperation - NFSClient *NFSOperation - ObjectClient *ObjectOperation - ObjectUserClient *ObjectUserOperation - PoolClient *PoolOperation - BucketClient *BucketOperation - UserClient *ClientOperation - RBDMirrorClient *RBDMirrorOperation - k8sh *utils.K8sHelper -} - -// CreateTestClient creates new instance of test client for a platform -func CreateTestClient(k8sHelper *utils.K8sHelper, manifests installer.CephManifests) *TestClient { - return &TestClient{ - CreateBlockOperation(k8sHelper, manifests), - CreateFilesystemOperation(k8sHelper, manifests), - CreateNFSOperation(k8sHelper, manifests), - CreateObjectOperation(k8sHelper, manifests), - CreateObjectUserOperation(k8sHelper, manifests), - CreatePoolOperation(k8sHelper, manifests), - CreateBucketOperation(k8sHelper, manifests), - CreateClientOperation(k8sHelper, manifests), - CreateRBDMirrorOperation(k8sHelper, manifests), - k8sHelper, - } -} - -// Status returns rook status details -func (c TestClient) Status(namespace string) (client.CephStatus, error) { - context := c.k8sh.MakeContext() - clusterInfo := client.AdminClusterInfo(namespace) - status, err := client.Status(context, clusterInfo) - if err != nil { - return client.CephStatus{}, fmt.Errorf("failed to get status: %+v", err) - } - return status, nil -} diff --git a/tests/framework/installer/cassandra_installer.go b/tests/framework/installer/cassandra_installer.go index a18216a2f..4d25de645 100644 --- a/tests/framework/installer/cassandra_installer.go +++ b/tests/framework/installer/cassandra_installer.go @@ -21,8 +21,8 @@ import ( "fmt" "testing" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/tests/framework/utils" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/tests/framework/utils" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/tests/framework/installer/cassandra_manifests.go b/tests/framework/installer/cassandra_manifests.go index 413c0a23d..deb256db7 100644 --- a/tests/framework/installer/cassandra_manifests.go +++ b/tests/framework/installer/cassandra_manifests.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" ) type CassandraManifests struct{} diff --git a/tests/framework/installer/ceph_helm_installer.go b/tests/framework/installer/ceph_helm_installer.go deleted file mode 100644 index a85bd9284..000000000 --- a/tests/framework/installer/ceph_helm_installer.go +++ /dev/null @@ -1,245 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "context" - "fmt" - "time" - - "github.com/pkg/errors" - "gopkg.in/yaml.v2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - OperatorChartName = "rook-ceph" - CephClusterChartName = "rook-ceph-cluster" -) - -// The Ceph Storage CustomResource and StorageClass names used in testing -const ( - blockPoolName = "ceph-block-test" - blockPoolSCName = "ceph-block-test-sc" - filesystemName = "ceph-filesystem-test" - filesystemSCName = "ceph-filesystem-test-sc" - objectStoreName = "ceph-objectstore-test" - objectStoreSCName = "ceph-bucket-test-sc" -) - -// CreateRookOperatorViaHelm creates rook operator via Helm chart named local/rook present in local repo -func (h *CephInstaller) CreateRookOperatorViaHelm(values map[string]interface{}) error { - // create the operator namespace before the admission controller is created - if err := h.k8shelper.CreateNamespace(h.settings.OperatorNamespace); err != nil { - return errors.Errorf("failed to create namespace %s. %v", h.settings.Namespace, err) - } - if err := h.startAdmissionController(); err != nil { - return errors.Errorf("Failed to start admission controllers: %v", err) - } - if err := h.helmHelper.InstallLocalRookHelmChart(h.settings.OperatorNamespace, OperatorChartName, values); err != nil { - return errors.Errorf("failed to install rook operator via helm, err : %v", err) - } - - return nil -} - -// CreateRookCephClusterViaHelm creates rook cluster via Helm -func (h *CephInstaller) CreateRookCephClusterViaHelm(values map[string]interface{}) error { - var err error - h.settings.DataDirHostPath, err = h.initTestDir(h.settings.Namespace) - if err != nil { - return err - } - - var clusterCRD map[string]interface{} - if err := yaml.Unmarshal([]byte(h.Manifests.GetCephCluster()), &clusterCRD); err != nil { - return err - } - - values["operatorNamespace"] = h.settings.OperatorNamespace - values["configOverride"] = clusterCustomSettings - values["toolbox"] = map[string]interface{}{ - "enabled": true, - "image": "rook/ceph:master", - } - values["cephClusterSpec"] = clusterCRD["spec"] - - if err := h.CreateBlockPoolConfiguration(values, blockPoolName, blockPoolSCName); err != nil { - return err - } - if err := h.CreateFileSystemConfiguration(values, filesystemName, filesystemSCName); err != nil { - return err - } - if err := h.CreateObjectStoreConfiguration(values, objectStoreName, objectStoreSCName); err != nil { - return err - } - - logger.Infof("Creating ceph cluster using Helm with values: %+v", values) - if err := h.helmHelper.InstallLocalRookHelmChart(h.settings.Namespace, CephClusterChartName, values); err != nil { - return err - } - - return nil -} - -// RemoveRookCephClusterHelmDefaultCustomResources tidies up the helm created CRs and Storage Classes, as they interfere with other tests. -func (h *CephInstaller) RemoveRookCephClusterHelmDefaultCustomResources() error { - if err := h.k8shelper.Clientset.StorageV1().StorageClasses().Delete(context.TODO(), blockPoolSCName, v1.DeleteOptions{}); err != nil { - return err - } - if err := h.k8shelper.Clientset.StorageV1().StorageClasses().Delete(context.TODO(), filesystemSCName, v1.DeleteOptions{}); err != nil { - return err - } - if err := h.k8shelper.Clientset.StorageV1().StorageClasses().Delete(context.TODO(), objectStoreSCName, v1.DeleteOptions{}); err != nil { - return err - } - if err := h.k8shelper.RookClientset.CephV1().CephBlockPools(h.settings.Namespace).Delete(context.TODO(), blockPoolName, v1.DeleteOptions{}); err != nil { - return err - } - if err := h.k8shelper.RookClientset.CephV1().CephFilesystems(h.settings.Namespace).Delete(context.TODO(), filesystemName, v1.DeleteOptions{}); err != nil { - return err - } - if err := h.k8shelper.RookClientset.CephV1().CephObjectStores(h.settings.Namespace).Delete(context.TODO(), objectStoreName, v1.DeleteOptions{}); err != nil { - return err - } - if !h.k8shelper.WaitUntilPodWithLabelDeleted(fmt.Sprintf("rook_object_store=%s", objectStoreName), h.settings.Namespace) { - return fmt.Errorf("rgw did not stop via crd") - } - return nil -} - -// ConfirmHelmClusterInstalledCorrectly runs some validation to check whether the helm chart installed correctly. -func (h *CephInstaller) ConfirmHelmClusterInstalledCorrectly() error { - storageClassList, err := h.k8shelper.Clientset.StorageV1().StorageClasses().List(context.TODO(), v1.ListOptions{}) - if err != nil { - return err - } - - foundStorageClasses := 0 - for _, storageClass := range storageClassList.Items { - if storageClass.Name == blockPoolSCName { - foundStorageClasses++ - } else if storageClass.Name == filesystemSCName { - foundStorageClasses++ - } else if storageClass.Name == objectStoreSCName { - foundStorageClasses++ - } - } - if foundStorageClasses != 3 { - return fmt.Errorf("did not find the three storage classes which should have been deployed") - } - - // check that ObjectStore is created - logger.Infof("Check that RGW pods are Running") - for i := 0; i < 24 && !h.k8shelper.CheckPodCountAndState("rook-ceph-rgw", h.settings.Namespace, 2, "Running"); i++ { - logger.Infof("(%d) RGW pod check sleeping for 5 seconds ...", i) - time.Sleep(5 * time.Second) - } - if !h.k8shelper.CheckPodCountAndState("rook-ceph-rgw", h.settings.Namespace, 2, "Running") { - return fmt.Errorf("did not find the rados gateway pod, which should have been deployed") - } - return nil -} - -// CreateBlockPoolConfiguration creates a block store configuration -func (h *CephInstaller) CreateBlockPoolConfiguration(values map[string]interface{}, name, scName string) error { - testBlockPoolBytes := []byte(h.Manifests.GetBlockPool("testPool", "1")) - var testBlockPoolCRD map[string]interface{} - if err := yaml.Unmarshal(testBlockPoolBytes, &testBlockPoolCRD); err != nil { - return err - } - - storageClassBytes := []byte(h.Manifests.GetBlockStorageClass(name, scName, "Delete")) - var testBlockSC map[string]interface{} - if err := yaml.Unmarshal(storageClassBytes, &testBlockSC); err != nil { - return err - } - - values["cephBlockPools"] = []map[string]interface{}{ - { - "name": name, - "spec": testBlockPoolCRD["spec"], - "storageClass": map[string]interface{}{ - "enabled": true, - "isDefault": true, - "name": scName, - "parameters": testBlockSC["parameters"], - "reclaimPolicy": "Delete", - "allowVolumeExpansion": true, - }, - }, - } - return nil -} - -// CreateFileSystemConfiguration creates a filesystem configuration -func (h *CephInstaller) CreateFileSystemConfiguration(values map[string]interface{}, name, scName string) error { - testFilesystemBytes := []byte(h.Manifests.GetFilesystem("testFilesystem", 1)) - var testFilesystemCRD map[string]interface{} - if err := yaml.Unmarshal(testFilesystemBytes, &testFilesystemCRD); err != nil { - return err - } - - storageClassBytes := []byte(h.Manifests.GetFileStorageClass(name, scName)) - var testFileSystemSC map[string]interface{} - if err := yaml.Unmarshal(storageClassBytes, &testFileSystemSC); err != nil { - return err - } - - values["cephFileSystems"] = []map[string]interface{}{ - { - "name": name, - "spec": testFilesystemCRD["spec"], - "storageClass": map[string]interface{}{ - "enabled": true, - "name": scName, - "parameters": testFileSystemSC["parameters"], - "reclaimPolicy": "Delete", - }, - }, - } - return nil -} - -// CreateObjectStoreConfiguration creates an object store configuration -func (h *CephInstaller) CreateObjectStoreConfiguration(values map[string]interface{}, name, scName string) error { - testObjectStoreBytes := []byte(h.Manifests.GetObjectStore(name, 2, 8080, false)) - var testObjectStoreCRD map[string]interface{} - if err := yaml.Unmarshal(testObjectStoreBytes, &testObjectStoreCRD); err != nil { - return err - } - - storageClassBytes := []byte(h.Manifests.GetBucketStorageClass(name, scName, "Delete", "us-east-1")) - var testObjectStoreSC map[string]interface{} - if err := yaml.Unmarshal(storageClassBytes, &testObjectStoreSC); err != nil { - return err - } - - values["cephObjectStores"] = []map[string]interface{}{ - { - "name": name, - "spec": testObjectStoreCRD["spec"], - "storageClass": map[string]interface{}{ - "enabled": true, - "name": scName, - "parameters": testObjectStoreSC["parameters"], - "reclaimPolicy": "Delete", - }, - }, - } - return nil -} diff --git a/tests/framework/installer/ceph_installer.go b/tests/framework/installer/ceph_installer.go deleted file mode 100644 index 758af83d6..000000000 --- a/tests/framework/installer/ceph_installer.go +++ /dev/null @@ -1,1030 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "context" - "flag" - "fmt" - "io/ioutil" - "math/rand" - "os" - "path" - "testing" - "time" - - "github.com/google/uuid" - "github.com/pkg/errors" - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" -) - -const ( - // test with the latest nautilus build - nautilusTestImage = "quay.io/ceph/ceph:v14" - // nautilusTestImagePartition is the image that contains working ceph-volume code to deploy OSDs on partitions - // currently only used for the upgrade test from 1.5 to 1.6, this cannot be changed to v14 since ceph-volume will fail to deploy OSD on partition on Rook 1.5 - nautilusTestImagePartition = "quay.io/ceph/ceph:v14.2.12" - // test with the latest octopus build - octopusTestImage = "quay.io/ceph/ceph:v15" - // test with the latest pacific build - pacificTestImage = "quay.io/ceph/ceph:v16" - // test with the latest master image - masterTestImage = "ceph/daemon-base:latest-master-devel" - cephOperatorLabel = "app=rook-ceph-operator" - defaultclusterName = "test-cluster" - - clusterCustomSettings = ` -[global] -osd_pool_default_size = 1 -bdev_flock_retry = 20 -` -) - -var ( - NautilusVersion = cephv1.CephVersionSpec{Image: nautilusTestImage} - NautilusPartitionVersion = cephv1.CephVersionSpec{Image: nautilusTestImagePartition} - OctopusVersion = cephv1.CephVersionSpec{Image: octopusTestImage} - PacificVersion = cephv1.CephVersionSpec{Image: pacificTestImage} - MasterVersion = cephv1.CephVersionSpec{Image: masterTestImage, AllowUnsupported: true} -) - -// CephInstaller wraps installing and uninstalling rook on a platform -type CephInstaller struct { - settings *TestCephSettings - Manifests CephManifests - k8shelper *utils.K8sHelper - hostPathToDelete string - helmHelper *utils.HelmHelper - k8sVersion string - changeHostnames bool - T func() *testing.T -} - -// CreateCephOperator creates rook-operator via kubectl -func (h *CephInstaller) CreateCephOperator() (err error) { - // creating rook resources - logger.Info("Creating Rook CRDs") - resources := h.Manifests.GetCRDs(h.k8shelper) - if _, err = h.k8shelper.KubectlWithStdin(resources, createFromStdinArgs...); err != nil { - return err - } - - if h.changeHostnames { - // give nodes a hostname that is different from its k8s node name to confirm that all the daemons will be initialized properly - err = h.k8shelper.ChangeHostnames() - assert.NoError(h.T(), err) - } - - // The operator namespace needs to be created explicitly, while the cluster namespace is created with the common.yaml - if err := h.k8shelper.CreateNamespace(h.settings.OperatorNamespace); err != nil { - return err - } - - // Create the namespace and RBAC before starting the operator - _, err = h.k8shelper.KubectlWithStdin(h.Manifests.GetCommon(), createFromStdinArgs...) - if err != nil { - return errors.Errorf("Failed to create rook-operator pod: %v ", err) - } - - err = h.startAdmissionController() - if err != nil { - return errors.Errorf("Failed to start admission controllers: %v", err) - } - - _, err = h.k8shelper.KubectlWithStdin(h.Manifests.GetOperator(), createFromStdinArgs...) - if err != nil { - return errors.Errorf("Failed to create rook-operator pod: %v", err) - } - - logger.Infof("Rook operator started") - return nil -} - -func (h *CephInstaller) startAdmissionController() error { - if !h.k8shelper.VersionAtLeast("v1.16.0") { - logger.Info("skipping the admission controller on K8s version older than v1.16") - return nil - } - if !h.settings.EnableAdmissionController { - logger.Info("skipping admission controller for this test suite") - return nil - } - if utils.IsPlatformOpenShift() { - logger.Info("skipping the admission controller on OpenShift") - return nil - } - - rootPath, err := utils.FindRookRoot() - if err != nil { - return errors.Errorf("failed to find rook root. %v", err) - } - userHome, err := os.UserHomeDir() - if err != nil { - return errors.Errorf("failed to find user home directory. %v", err) - } - scriptPath := path.Join(rootPath, "tests/scripts/deploy_admission_controller_test.sh") - err = h.k8shelper.MakeContext().Executor.ExecuteCommandWithEnv([]string{fmt.Sprintf("NAMESPACE=%s", h.settings.OperatorNamespace), fmt.Sprintf("HOME=%s", userHome)}, "bash", scriptPath) - if err != nil { - return err - } - - return nil -} - -func (h *CephInstaller) WaitForToolbox(namespace string) error { - if err := h.k8shelper.WaitForLabeledPodsToRun("app=rook-ceph-tools", namespace); err != nil { - return errors.Wrap(err, "Rook Toolbox couldn't start") - } - logger.Infof("Rook Toolbox started") - - podNames, err := h.k8shelper.GetPodNamesForApp("rook-ceph-tools", namespace) - assert.NoError(h.T(), err) - for _, podName := range podNames { - // All e2e tests should run ceph commands in the toolbox since we are not inside a container - logger.Infof("found active toolbox pod: %q", podName) - client.RunAllCephCommandsInToolboxPod = podName - return nil - } - - return errors.Errorf("could not find toolbox pod") -} - -// CreateRookToolbox creates rook-ceph-tools via kubectl -func (h *CephInstaller) CreateRookToolbox(manifests CephManifests) (err error) { - logger.Infof("Starting Rook toolbox") - - _, err = h.k8shelper.KubectlWithStdin(manifests.GetToolbox(), createFromStdinArgs...) - if err != nil { - return errors.Wrap(err, "failed to create rook-toolbox pod") - } - - return h.WaitForToolbox(manifests.Settings().Namespace) -} - -// Execute a command in the ceph toolbox -func (h *CephInstaller) Execute(command string, parameters []string, namespace string) (error, string) { - clusterInfo := client.AdminClusterInfo(namespace) - cmd, args := client.FinalizeCephCommandArgs(command, clusterInfo, parameters, h.k8shelper.MakeContext().ConfigDir) - result, err := h.k8shelper.MakeContext().Executor.ExecuteCommandWithOutput(cmd, args...) - if err != nil { - logger.Warningf("Error executing command %q: <%v>", command, err) - return err, result - } - return nil, result -} - -// CreateCephCluster creates rook cluster via kubectl -func (h *CephInstaller) CreateCephCluster() error { - - ctx := context.TODO() - var err error - h.settings.DataDirHostPath, err = h.initTestDir(h.settings.Namespace) - if err != nil { - return errors.Errorf("failed to create test dir. %+v", err) - } - logger.Infof("Creating cluster with settings: %+v", h.settings) - - logger.Infof("Creating custom ceph.conf settings") - customSettings := map[string]string{"config": clusterCustomSettings} - customCM := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-config-override", - Namespace: h.settings.Namespace, - }, - Data: customSettings, - } - if _, err := h.k8shelper.Clientset.CoreV1().ConfigMaps(h.settings.Namespace).Create(ctx, customCM, metav1.CreateOptions{}); err != nil { - return errors.Errorf("failed to create custom ceph.conf. %+v", err) - } - - logger.Info("Starting Rook Cluster") - rookCluster := h.Manifests.GetCephCluster() - logger.Info(rookCluster) - maxTry := 10 - for i := 0; i < maxTry; i++ { - _, err := h.k8shelper.KubectlWithStdin(rookCluster, createFromStdinArgs...) - if err == nil { - break - } - if i == maxTry-1 { - return errors.Errorf("failed to create rook cluster. %v", err) - } - logger.Infof("failed to create rook cluster, trying again... %v", err) - time.Sleep(5 * time.Second) - } - - return nil -} - -func (h *CephInstaller) waitForCluster() error { - if err := h.k8shelper.WaitForPodCount("app=rook-ceph-mon", h.settings.Namespace, h.settings.Mons); err != nil { - return err - } - - if err := h.k8shelper.WaitForPodCount("app=rook-ceph-mgr", h.settings.Namespace, 1); err != nil { - return err - } - - if !h.settings.SkipOSDCreation { - if err := h.k8shelper.WaitForPodCount("app=rook-ceph-osd", h.settings.Namespace, 1); err != nil { - return err - } - } - - if h.settings.UseCrashPruner { - if err := h.k8shelper.WaitForCronJob("rook-ceph-crashcollector-pruner", h.settings.Namespace); err != nil { - return err - } - } - - logger.Infof("Rook Cluster started") - if !h.settings.SkipOSDCreation { - return h.k8shelper.WaitForLabeledPodsToRun("app=rook-ceph-osd", h.settings.Namespace) - } - - return nil -} - -// CreateRookExternalCluster creates rook external cluster via kubectl -func (h *CephInstaller) CreateRookExternalCluster(externalManifests CephManifests) error { - var err error - externalSettings := externalManifests.Settings() - externalSettings.DataDirHostPath, err = h.initTestDir(externalSettings.Namespace) - if err != nil { - return errors.Errorf("failed to create test dir. %+v", err) - } - - logger.Infof("Creating external cluster %q with core storage namespace %q", externalSettings.Namespace, h.settings.Namespace) - - logger.Infof("Creating external cluster roles") - roles := externalManifests.GetCommonExternal() - if _, err := h.k8shelper.KubectlWithStdin(roles, createFromStdinArgs...); err != nil { - return errors.Wrap(err, "failed to create cluster roles") - } - - // Inject connection information from the first cluster - logger.Info("Injecting cluster connection information") - err = h.injectRookExternalClusterInfo(externalSettings) - if err != nil { - return errors.Wrap(err, "failed to inject cluster information into the external cluster") - } - - // Start the external cluster - logger.Infof("Starting Rook External Cluster with yaml") - rookCluster := externalManifests.GetExternalCephCluster() - if _, err := h.k8shelper.KubectlWithStdin(rookCluster, createFromStdinArgs...); err != nil { - return errors.Wrap(err, "failed to create rook external cluster") - } - - logger.Infof("Running toolbox on external namespace %q", externalSettings.Namespace) - if err := h.CreateRookToolbox(externalManifests); err != nil { - return errors.Wrap(err, "failed to start toolbox on external cluster") - } - - var clusterStatus cephv1.ClusterStatus - for i := 0; i < 8; i++ { - ctx := context.TODO() - clusterResource, err := h.k8shelper.RookClientset.CephV1().CephClusters(externalSettings.Namespace).Get(ctx, externalSettings.ClusterName, metav1.GetOptions{}) - if err != nil { - logger.Warningf("failed to get external cluster CR, retrying. %v", err) - time.Sleep(time.Second * 5) - continue - } - - clusterStatus = clusterResource.Status - clusterPhase := string(clusterResource.Status.Phase) - if clusterPhase != "Connected" { - logger.Warningf("failed to start external cluster, retrying, state: %v", clusterResource.Status) - time.Sleep(time.Second * 5) - } else if clusterPhase == "Connected" { - logger.Info("Rook external cluster connected") - return nil - } - - } - - return errors.Errorf("failed to start external cluster, state: %v", clusterStatus) -} - -// InjectRookExternalClusterInfo inject connection information for an external cluster -func (h *CephInstaller) injectRookExternalClusterInfo(externalSettings *TestCephSettings) error { - ctx := context.TODO() - // get config map - cm, err := h.GetRookExternalClusterMonConfigMap() - if err != nil { - return errors.Errorf("failed to get configmap. %v", err) - } - - // create config map - _, err = h.k8shelper.Clientset.CoreV1().ConfigMaps(externalSettings.Namespace).Create(ctx, cm, metav1.CreateOptions{}) - if err != nil { - return errors.Errorf("failed to create configmap. %v", err) - } - - // get secret - secret, err := h.GetRookExternalClusterMonSecret() - if err != nil { - return errors.Errorf("failed to get secret. %v", err) - } - - // create secret - _, err = h.k8shelper.Clientset.CoreV1().Secrets(externalSettings.Namespace).Create(ctx, secret, metav1.CreateOptions{}) - if err != nil { - return errors.Errorf("failed to create secret. %v", err) - } - - return nil -} - -// GetRookExternalClusterMonConfigMap gets the monitor kubernetes configmap of the external cluster -func (h *CephInstaller) GetRookExternalClusterMonConfigMap() (*v1.ConfigMap, error) { - ctx := context.TODO() - configMapName := "rook-ceph-mon-endpoints" - externalCM, err := h.k8shelper.Clientset.CoreV1().ConfigMaps(h.settings.Namespace).Get(ctx, configMapName, metav1.GetOptions{}) - if err != nil { - return nil, errors.Errorf("failed to get secret. %v", err) - } - newCM := &v1.ConfigMap{} - newCM.Name = externalCM.Name - newCM.Data = externalCM.Data - - return newCM, nil -} - -// GetRookExternalClusterMonSecret gets the monitor kubernetes secret of the external cluster -func (h *CephInstaller) GetRookExternalClusterMonSecret() (*v1.Secret, error) { - ctx := context.TODO() - secretName := "rook-ceph-mon" //nolint:gosec // We safely suppress gosec in tests file - - externalSecret, err := h.k8shelper.Clientset.CoreV1().Secrets(h.settings.Namespace).Get(ctx, secretName, metav1.GetOptions{}) - if err != nil { - return nil, errors.Errorf("failed to get secret. %v", err) - } - newSecret := &v1.Secret{} - newSecret.Name = externalSecret.Name - newSecret.Data = externalSecret.Data - - return newSecret, nil -} - -func (h *CephInstaller) initTestDir(namespace string) (string, error) { - val, err := baseTestDir() - if err != nil { - return "", err - } - - h.hostPathToDelete = path.Join(val, "rook-test") - testDir := path.Join(h.hostPathToDelete, namespace) - - // skip the test dir creation if we are not running under "/data" - if val != "/data" { - // Create the test dir on the local host - if err := os.MkdirAll(testDir, 0777); err != nil { - return "", err - } - - var err error - if testDir, err = ioutil.TempDir(testDir, "test-"); err != nil { - return "", err - } - } else { - // Compose a random test directory name without actually creating it since not running on the localhost - r := rand.Int() //nolint:gosec // We safely suppress gosec in tests file - testDir = path.Join(testDir, fmt.Sprintf("test-%d", r)) - } - return testDir, nil -} - -// GetNodeHostnames returns the list of nodes in the k8s cluster -func (h *CephInstaller) GetNodeHostnames() ([]string, error) { - ctx := context.TODO() - nodes, err := h.k8shelper.Clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, errors.Errorf("failed to get k8s nodes. %+v", err) - } - var names []string - for _, node := range nodes.Items { - names = append(names, node.Labels[v1.LabelHostname]) - } - - return names, nil -} - -func (h *CephInstaller) installRookOperator() (bool, error) { - ctx := context.TODO() - var err error - - startDiscovery := false - - h.k8shelper.CreateAnonSystemClusterBinding() - - // Create rook operator - logger.Infof("Starting Rook Operator") - if h.settings.UseHelm { - // enable the discovery daemonset with the helm chart - startDiscovery = true - err := h.CreateRookOperatorViaHelm(map[string]interface{}{ - "enableDiscoveryDaemon": true, - "image": map[string]interface{}{"tag": "master"}, - }) - if err != nil { - return false, errors.Wrap(err, "failed to configure helm") - } - } else { - err := h.CreateCephOperator() - if err != nil { - return false, errors.Wrap(err, "failed to configure ceph operator") - } - } - if !h.k8shelper.IsPodInExpectedState("rook-ceph-operator", h.settings.OperatorNamespace, "Running") { - logger.Error("rook-ceph-operator is not running") - h.k8shelper.GetLogsFromNamespace(h.settings.OperatorNamespace, "test-setup", utils.TestEnvName()) - logger.Error("rook-ceph-operator is not Running, abort!") - return false, err - } - - // disable admission controller test for Kubernetes version older than v1.16.0 - if h.settings.EnableAdmissionController && !utils.IsPlatformOpenShift() && h.k8shelper.VersionAtLeast("v1.16.0") { - if !h.k8shelper.IsPodInExpectedState("rook-ceph-admission-controller", h.settings.OperatorNamespace, "Running") { - assert.Fail(h.T(), "admission controller is not running") - return false, errors.Errorf("admission controller is not running") - } - } - - discovery, err := h.k8shelper.Clientset.AppsV1().DaemonSets(h.settings.OperatorNamespace).Get(ctx, "rook-discover", metav1.GetOptions{}) - if startDiscovery { - assert.NoError(h.T(), err) - assert.NotNil(h.T(), discovery) - } else { - assert.Error(h.T(), err) - assert.True(h.T(), kerrors.IsNotFound(err)) - } - - return true, nil -} - -func (h *CephInstaller) InstallRook() (bool, error) { - if h.settings.RookVersion != VersionMaster { - // make sure we have the images from a previous release locally so the test doesn't hit a timeout - assert.NoError(h.T(), h.k8shelper.GetDockerImage("rook/ceph:"+h.settings.RookVersion)) - } - - assert.NoError(h.T(), h.k8shelper.GetDockerImage(h.settings.CephVersion.Image)) - - k8sversion := h.k8shelper.GetK8sServerVersion() - - logger.Infof("Installing rook on K8s %s", k8sversion) - success, err := h.installRookOperator() - if err != nil { - return false, err - } - if !success { - return false, nil - } - - if h.settings.UseHelm { - err = h.CreateRookCephClusterViaHelm(map[string]interface{}{ - "image": "rook/ceph:master", - }) - if err != nil { - return false, errors.Wrap(err, "failed to install ceph cluster using Helm") - } - } else { - // Create rook cluster - err = h.CreateCephCluster() - if err != nil { - logger.Errorf("Cluster %q install failed. %v", h.settings.Namespace, err) - return false, err - } - } - - logger.Info("Waiting for Rook Cluster") - if err := h.waitForCluster(); err != nil { - return false, err - } - - if h.settings.UseHelm { - err := h.WaitForToolbox(h.settings.Namespace) - if err != nil { - return false, err - } - } else { - err = h.CreateRookToolbox(h.Manifests) - if err != nil { - return false, errors.Wrapf(err, "failed to install toolbox in cluster %s", h.settings.Namespace) - } - } - - const loopCount = 20 - for i := 0; i < loopCount; i++ { - _, err = client.Status(h.k8shelper.MakeContext(), client.AdminClusterInfo(h.settings.Namespace)) - if err == nil { - logger.Infof("toolbox ready") - break - } - logger.Infof("toolbox is not ready") - if i == loopCount-1 { - return false, errors.Errorf("toolbox cannot connect to cluster") - } - - time.Sleep(5 * time.Second) - } - - if h.settings.UseHelm { - logger.Infof("Confirming ceph cluster installed correctly") - if err := h.ConfirmHelmClusterInstalledCorrectly(); err != nil { - return false, errors.Wrap(err, "the ceph cluster storage CustomResources did not install correctly") - } - if !h.settings.RetainHelmDefaultStorageCRs { - err = h.RemoveRookCephClusterHelmDefaultCustomResources() - if err != nil { - return false, errors.Wrap(err, "failed to remove the default helm CustomResources") - } - } - } - - logger.Infof("installed rook operator and cluster %s on k8s %s", h.settings.Namespace, h.k8sVersion) - - return true, nil -} - -// UninstallRook uninstalls rook from k8s -func (h *CephInstaller) UninstallRook() { - h.UninstallRookFromMultipleNS(h.Manifests) -} - -// UninstallRookFromMultipleNS uninstalls rook from multiple namespaces in k8s -func (h *CephInstaller) UninstallRookFromMultipleNS(manifests ...CephManifests) { - ctx := context.TODO() - var clusterNamespaces []string - for _, manifest := range manifests { - clusterNamespaces = append(clusterNamespaces, manifest.Settings().Namespace) - } - - // Gather logs after status checks - h.GatherAllRookLogs(h.T().Name(), append([]string{h.settings.OperatorNamespace}, clusterNamespaces...)...) - - // If test failed do not teardown and leave the cluster in the state it is - if h.T().Failed() { - logger.Info("one of the tests failed, leaving the cluster in its bad shape for investigation") - return - } - - logger.Infof("Uninstalling Rook") - var err error - skipOperatorCleanup := false - for _, manifest := range manifests { - namespace := manifest.Settings().Namespace - clusterName := manifest.Settings().ClusterName - if manifest.Settings().SkipCleanupPolicy && manifest.Settings().SkipClusterCleanup { - logger.Infof("SKIPPING ALL CLEANUP for namespace %q", namespace) - skipOperatorCleanup = true - continue - } - - testCleanupPolicy := !h.settings.UseHelm && !manifest.Settings().IsExternal && !manifest.Settings().SkipCleanupPolicy - if testCleanupPolicy { - // Add cleanup policy to the core ceph cluster - err = h.addCleanupPolicy(namespace, clusterName) - if err != nil { - assert.NoError(h.T(), err) - // no need to check for cleanup policy later if it already failed - testCleanupPolicy = false - } - - // if the test passed, check that the ceph status is HEALTH_OK before we tear the cluster down - if !h.T().Failed() { - // Only check the Ceph status for the core cluster - // The check won't work for an external cluster since the core cluster is already gone - h.checkCephHealthStatus() - } - } - - // The pool CRs should already be removed by the tests that created them - pools, err := h.k8shelper.RookClientset.CephV1().CephBlockPools(namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(h.T(), err, "failed to retrieve pool CRs") - for _, pool := range pools.Items { - logger.Infof("found pools: %v", pools) - assert.Fail(h.T(), fmt.Sprintf("pool %q still exists", pool.Name)) - // Get the operator log - h.GatherAllRookLogs(h.T().Name()+"poolcheck", h.settings.OperatorNamespace) - } - - if h.settings.UseHelm { - // helm rook-ceph-cluster cleanup - if h.settings.RetainHelmDefaultStorageCRs { - err = h.RemoveRookCephClusterHelmDefaultCustomResources() - if err != nil { - assert.Fail(h.T(), "failed to remove the default helm CustomResources") - } - } - err = h.helmHelper.DeleteLocalRookHelmChart(namespace, CephClusterChartName) - checkError(h.T(), err, fmt.Sprintf("cannot uninstall helm chart %s", CephClusterChartName)) - } else { - err = h.k8shelper.DeleteResourceAndWait(false, "-n", namespace, "cephcluster", clusterName) - checkError(h.T(), err, fmt.Sprintf("cannot remove cluster %s", namespace)) - - clusterDeleteRetries := 0 - crdCheckerFunc := func() error { - _, err := h.k8shelper.RookClientset.CephV1().CephClusters(namespace).Get(ctx, clusterName, metav1.GetOptions{}) - clusterDeleteRetries++ - if clusterDeleteRetries > 10 { - // If the operator really isn't going to remove the finalizer, just force remove it - h.removeClusterFinalizers(namespace, clusterName) - } - - return err - } - err = h.k8shelper.WaitForCustomResourceDeletion(namespace, clusterName, crdCheckerFunc) - checkError(h.T(), err, fmt.Sprintf("failed to wait for cluster crd %s deletion", namespace)) - } - - if testCleanupPolicy { - err = h.waitForCleanupJobs(namespace) - if err != nil { - assert.NoError(h.T(), err) - h.GatherAllRookLogs(h.T().Name()+"cleanup-job", append([]string{h.settings.OperatorNamespace}, clusterNamespaces...)...) - } - } - - // helm operator cleanup - if h.settings.UseHelm { - err = h.helmHelper.DeleteLocalRookHelmChart(namespace, OperatorChartName) - checkError(h.T(), err, fmt.Sprintf("cannot uninstall helm chart %s", OperatorChartName)) - - // delete the entire namespace (in non-helm installs it's removed with the common.yaml) - err = h.k8shelper.DeleteResourceAndWait(false, "namespace", namespace) - checkError(h.T(), err, fmt.Sprintf("cannot delete namespace %s", namespace)) - continue - } - - // Skip the remainder of cluster cleanup if desired - if manifest.Settings().SkipClusterCleanup { - logger.Infof("SKIPPING CLUSTER CLEANUP") - skipOperatorCleanup = true - continue - } - - // non-helm cleanup - if manifest.Settings().IsExternal { - logger.Infof("Deleting all the resources in the common external manifest") - _, err = h.k8shelper.KubectlWithStdin(manifest.GetCommonExternal(), deleteFromStdinArgs...) - if err != nil { - logger.Errorf("failed to remove common external resources. %v", err) - } else { - logger.Infof("done deleting all the resources in the common external manifest") - } - } else { - h.k8shelper.PrintResources(namespace, "cephblockpools.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephclients.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephclusters.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephfilesystemmirrors.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephfilesystems.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephnfses.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephobjectrealms.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephobjectstores.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephobjectstoreusers.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephobjectzonegroups.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephobjectzones.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "cephrbdmirrors.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "objectbucketclaims.ceph.rook.io") - h.k8shelper.PrintResources(namespace, "objectbuckets.ceph.rook.io") - h.k8shelper.PrintPodStatus(namespace) - h.k8shelper.PrintPVs(true) - logger.Infof("Deleting all the resources in the common manifest") - _, err = h.k8shelper.KubectlWithStdin(h.Manifests.GetCommon(), deleteFromStdinArgs...) - if err != nil { - logger.Errorf("failed to remove common manifest. %v", err) - } else { - logger.Infof("done deleting all the resources in the common manifest") - } - } - } - - // Skip the remainder of cluster cleanup if desired - if skipOperatorCleanup { - logger.Infof("SKIPPING OPERATOR CLEANUP") - return - } - - if !h.settings.UseHelm { - logger.Infof("Deleting all the resources in the operator manifest") - _, err = h.k8shelper.KubectlWithStdin(h.Manifests.GetOperator(), deleteFromStdinArgs...) - if err != nil { - logger.Errorf("failed to remove operator resources. %v", err) - } else { - logger.Infof("done deleting all the resources in the operator manifest") - } - } - - logger.Info("removing the CRDs") - _, err = h.k8shelper.KubectlWithStdin(h.Manifests.GetCRDs(h.k8shelper), deleteFromStdinArgs...) - if err != nil { - logger.Errorf("failed to remove CRDS. %v", err) - } else { - logger.Infof("done deleting all the CRDs") - } - - err = h.k8shelper.DeleteResourceAndWait(false, "namespace", h.settings.OperatorNamespace) - checkError(h.T(), err, fmt.Sprintf("cannot delete operator namespace %s", h.settings.OperatorNamespace)) - - err = h.k8shelper.Clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, "rook-ceph-webhook", metav1.DeleteOptions{}) - checkError(h.T(), err, "failed to delete webhook configuration") - - logger.Infof("done removing the operator from namespace %s", h.settings.OperatorNamespace) - logger.Infof("removing host data dir %s", h.hostPathToDelete) - // removing data dir if exists - if h.hostPathToDelete != "" { - nodes, err := h.GetNodeHostnames() - checkError(h.T(), err, "cannot get node names") - for _, node := range nodes { - err = h.verifyDirCleanup(node, h.hostPathToDelete) - logger.Infof("verified cleanup of %s from node %s", h.hostPathToDelete, node) - assert.NoError(h.T(), err) - } - } - if h.changeHostnames { - // revert the hostname labels for the test - _, err = h.k8shelper.RestoreHostnames() - assert.NoError(h.T(), err) - } - - // wait a bit longer for the system namespace to be cleaned up after their deletion - for i := 0; i < 15; i++ { - _, err := h.k8shelper.Clientset.CoreV1().Namespaces().Get(ctx, h.settings.OperatorNamespace, metav1.GetOptions{}) - if err != nil && kerrors.IsNotFound(err) { - logger.Infof("operator namespace %q removed", h.settings.OperatorNamespace) - break - } - logger.Infof("operator namespace %q still found...", h.settings.OperatorNamespace) - time.Sleep(5 * time.Second) - } -} - -func (h *CephInstaller) removeClusterFinalizers(namespace, clusterName string) { - ctx := context.TODO() - // Get the latest cluster instead of using the same instance in case it has been changed - cluster, err := h.k8shelper.RookClientset.CephV1().CephClusters(namespace).Get(ctx, clusterName, metav1.GetOptions{}) - if err != nil { - logger.Errorf("failed to remove finalizer. failed to get cluster. %+v", err) - return - } - objectMeta := &cluster.ObjectMeta - if len(objectMeta.Finalizers) == 0 { - logger.Infof("no finalizers to remove from cluster %s", namespace) - return - } - objectMeta.Finalizers = nil - _, err = h.k8shelper.RookClientset.CephV1().CephClusters(cluster.Namespace).Update(ctx, cluster, metav1.UpdateOptions{}) - if err != nil { - logger.Errorf("failed to remove finalizers from cluster %s. %+v", objectMeta.Name, err) - return - } - logger.Infof("removed finalizers from cluster %s", objectMeta.Name) -} - -func (h *CephInstaller) checkCephHealthStatus() { - ctx := context.TODO() - clusterResource, err := h.k8shelper.RookClientset.CephV1().CephClusters(h.settings.Namespace).Get(ctx, h.settings.ClusterName, metav1.GetOptions{}) - assert.Nil(h.T(), err) - clusterPhase := string(clusterResource.Status.Phase) - if clusterPhase != "Ready" && clusterPhase != "Connected" && clusterPhase != "Progressing" { - assert.Equal(h.T(), "Ready", string(clusterResource.Status.Phase)) - } - - // Depending on the tests, the health may be fluctuating with different components being started or stopped. - // If needed, give it a few seconds to settle and check the status again. - logger.Infof("checking ceph cluster health in namespace %q", h.settings.Namespace) - if clusterResource.Status.CephStatus.Health != "HEALTH_OK" { - time.Sleep(10 * time.Second) - clusterResource, err = h.k8shelper.RookClientset.CephV1().CephClusters(h.settings.Namespace).Get(ctx, h.settings.ClusterName, metav1.GetOptions{}) - assert.Nil(h.T(), err) - } - - // The health status is not stable enough for the integration tests to rely on. - // We should enable this check if we can get the ceph status to be stable despite all the changing configurations performed by rook. - //assert.Equal(h.T(), "HEALTH_OK", clusterResource.Status.CephStatus.Health) - assert.NotEqual(h.T(), "", clusterResource.Status.CephStatus.LastChecked) - - // Print the details if the health is not ok - if clusterResource.Status.CephStatus.Health != "HEALTH_OK" { - logger.Errorf("Ceph health status: %s", clusterResource.Status.CephStatus.Health) - for name, message := range clusterResource.Status.CephStatus.Details { - logger.Errorf("Ceph health message: %s. %s: %s", name, message.Severity, message.Message) - } - } -} - -func (h *CephInstaller) verifyDirCleanup(node, dir string) error { - resources := h.GetCleanupVerificationPod(node, dir) - _, err := h.k8shelper.KubectlWithStdin(resources, createFromStdinArgs...) - return err -} - -func (h *CephInstaller) CollectOperatorLog(suiteName, testName string) { - if !h.T().Failed() && TestLogCollectionLevel() != "all" { - return - } - name := fmt.Sprintf("%s_%s", suiteName, testName) - h.k8shelper.CollectPodLogsFromLabel(cephOperatorLabel, h.settings.OperatorNamespace, name, utils.TestEnvName()) -} - -func (h *CephInstaller) GatherAllRookLogs(testName string, namespaces ...string) { - if !h.T().Failed() && TestLogCollectionLevel() != "all" { - return - } - logger.Infof("gathering all logs from the test") - for _, namespace := range namespaces { - h.k8shelper.GetLogsFromNamespace(namespace, testName, utils.TestEnvName()) - h.k8shelper.GetPodDescribeFromNamespace(namespace, testName, utils.TestEnvName()) - h.k8shelper.GetEventsFromNamespace(namespace, testName, utils.TestEnvName()) - } -} - -// NewCephInstaller creates new instance of CephInstaller -func NewCephInstaller(t func() *testing.T, clientset *kubernetes.Clientset, settings *TestCephSettings) *CephInstaller { - - // By default set a cluster name that is different from the namespace so we don't rely on the namespace - // in expected places - if settings.ClusterName == "" { - settings.ClusterName = defaultclusterName - } - - version, err := clientset.ServerVersion() - if err != nil { - logger.Infof("failed to get kubectl server version. %+v", err) - } - - k8shelp, err := utils.CreateK8sHelper(t) - if err != nil { - panic("failed to get kubectl client :" + err.Error()) - } - logger.Infof("Rook Version: %s", settings.RookVersion) - logger.Infof("Ceph Version: %s", settings.CephVersion.Image) - h := &CephInstaller{ - settings: settings, - Manifests: NewCephManifests(settings), - k8shelper: k8shelp, - helmHelper: utils.NewHelmHelper(testHelmPath()), - k8sVersion: version.String(), - changeHostnames: k8shelp.VersionAtLeast("v1.18.0"), - T: t, - } - flag.Parse() - return h -} - -// GetCleanupPod gets a cleanup Pod that cleans up the dataDirHostPath -func (h *CephInstaller) GetCleanupPod(node, removalDir string) string { - return `apiVersion: batch/v1 -kind: Job -metadata: - name: rook-cleanup-` + uuid.Must(uuid.NewRandom()).String() + ` -spec: - template: - spec: - restartPolicy: Never - containers: - - name: rook-cleaner - image: rook/ceph:` + VersionMaster + ` - securityContext: - privileged: true - volumeMounts: - - name: cleaner - mountPath: /scrub - command: - - "sh" - - "-c" - - "rm -rf /scrub/*" - nodeSelector: - kubernetes.io/hostname: ` + node + ` - volumes: - - name: cleaner - hostPath: - path: ` + removalDir -} - -// GetCleanupVerificationPod verifies that the dataDirHostPath is empty -func (h *CephInstaller) GetCleanupVerificationPod(node, hostPathDir string) string { - return `apiVersion: batch/v1 -kind: Job -metadata: - name: rook-verify-cleanup-` + uuid.Must(uuid.NewRandom()).String() + ` -spec: - template: - spec: - restartPolicy: Never - containers: - - name: rook-cleaner - image: rook/ceph:` + VersionMaster + ` - securityContext: - privileged: true - volumeMounts: - - name: cleaner - mountPath: /scrub - command: - - "sh" - - "-c" - - | - set -xEeuo pipefail - #Assert dataDirHostPath is empty - if [ "$(ls -A /scrub/)" ]; then - exit 1 - fi - nodeSelector: - kubernetes.io/hostname: ` + node + ` - volumes: - - name: cleaner - hostPath: - path: ` + hostPathDir -} - -func (h *CephInstaller) addCleanupPolicy(namespace, clusterName string) error { - // Retry updating the CR a few times in case of random failure - var returnErr error - for i := 0; i < 3; i++ { - ctx := context.TODO() - cluster, err := h.k8shelper.RookClientset.CephV1().CephClusters(namespace).Get(ctx, clusterName, metav1.GetOptions{}) - if err != nil { - return errors.Errorf("failed to get ceph cluster. %+v", err) - } - cluster.Spec.CleanupPolicy.Confirmation = cephv1.DeleteDataDirOnHostsConfirmation - cluster.Spec.CleanupPolicy.AllowUninstallWithVolumes = true - _, err = h.k8shelper.RookClientset.CephV1().CephClusters(namespace).Update(ctx, cluster, metav1.UpdateOptions{}) - if err != nil { - returnErr = errors.Errorf("failed to add clean up policy to the cluster. %+v", err) - logger.Warningf("could not add cleanup policy, trying again... %v", err) - } else { - logger.Info("successfully added cleanup policy to the ceph cluster and skipping checks for existing volumes") - return nil - } - } - return returnErr -} - -func (h *CephInstaller) waitForCleanupJobs(namespace string) error { - ctx := context.TODO() - allRookCephCleanupJobs := func() (done bool, err error) { - appLabelSelector := fmt.Sprintf("app=%s", cluster.CleanupAppName) - cleanupJobs, err := h.k8shelper.Clientset.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{LabelSelector: appLabelSelector}) - if err != nil { - return false, errors.Errorf("failed to get cleanup jobs. %+v", err) - } - // Clean up jobs might take some time to start - if len(cleanupJobs.Items) == 0 { - logger.Infof("no jobs with label selector %q found.", appLabelSelector) - return false, nil - } - for _, job := range cleanupJobs.Items { - logger.Infof("job %q status: %+v", job.Name, job.Status) - if job.Status.Failed > 0 { - return false, errors.Errorf("job %s failed", job.Name) - } - if job.Status.Succeeded == 1 { - l, err := h.k8shelper.Kubectl("-n", namespace, "logs", fmt.Sprintf("job.batch/%s", job.Name)) - if err != nil { - logger.Errorf("cannot get logs for pod %s. %v", job.Name, err) - } - rawData := []byte(l) - logger.Infof("cleanup job %s done. logs: %s", job.Name, string(rawData)) - } - if job.Status.Succeeded == 0 { - return false, nil - } - } - logger.Infof("cleanup job(s) completed") - return true, nil - } - - logger.Info("waiting for job(s) to cleanup the host...") - err := wait.Poll(5*time.Second, 90*time.Second, allRookCephCleanupJobs) - if err != nil { - return errors.Errorf("failed to wait for clean up jobs to complete. %+v", err) - } - - logger.Info("successfully executed all the ceph clean up jobs") - return nil -} diff --git a/tests/framework/installer/ceph_manifests.go b/tests/framework/installer/ceph_manifests.go deleted file mode 100644 index ca913f9b8..000000000 --- a/tests/framework/installer/ceph_manifests.go +++ /dev/null @@ -1,519 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "fmt" - "strconv" - "strings" - - "github.com/rook/rook/tests/framework/utils" -) - -type CephManifests interface { - Settings() *TestCephSettings - GetCRDs(k8shelper *utils.K8sHelper) string - GetOperator() string - GetCommon() string - GetCommonExternal() string - GetCephCluster() string - GetExternalCephCluster() string - GetToolbox() string - GetBlockPool(poolName, replicaSize string) string - GetBlockStorageClass(poolName, storageClassName, reclaimPolicy string) string - GetFileStorageClass(fsName, storageClassName string) string - GetBlockSnapshotClass(snapshotClassName, reclaimPolicy string) string - GetFileStorageSnapshotClass(snapshotClassName, reclaimPolicy string) string - GetFilesystem(name string, activeCount int) string - GetNFS(name, pool string, daemonCount int) string - GetRBDMirror(name string, daemonCount int) string - GetObjectStore(name string, replicaCount, port int, tlsEnable bool) string - GetObjectStoreUser(name, displayName, store string) string - GetBucketStorageClass(storeName, storageClassName, reclaimPolicy, region string) string - GetOBC(obcName, storageClassName, bucketName string, maxObject string, createBucket bool) string - GetClient(name string, caps map[string]string) string -} - -// CephManifestsMaster wraps rook yaml definitions -type CephManifestsMaster struct { - settings *TestCephSettings -} - -// NewCephManifests gets the manifest type depending on the Rook version desired -func NewCephManifests(settings *TestCephSettings) CephManifests { - switch settings.RookVersion { - case VersionMaster: - return &CephManifestsMaster{settings} - case Version1_6: - return &CephManifestsV1_6{settings} - } - panic(fmt.Errorf("unrecognized ceph manifest version: %s", settings.RookVersion)) -} - -func (m *CephManifestsMaster) Settings() *TestCephSettings { - return m.settings -} - -func (m *CephManifestsMaster) GetCRDs(k8shelper *utils.K8sHelper) string { - if k8shelper.VersionAtLeast("v1.16.0") { - return m.settings.readManifest("crds.yaml") - } - return m.settings.readManifest("pre-k8s-1.16/crds.yaml") -} - -func (m *CephManifestsMaster) GetOperator() string { - var manifest string - if utils.IsPlatformOpenShift() { - manifest = m.settings.readManifest("operator-openshift.yaml") - } else { - manifest = m.settings.readManifest("operator.yaml") - } - return m.settings.replaceOperatorSettings(manifest) -} - -func (m *CephManifestsMaster) GetCommonExternal() string { - return m.settings.readManifest("common-external.yaml") -} - -func (m *CephManifestsMaster) GetCommon() string { - return m.settings.readManifest("common.yaml") -} - -func (m *CephManifestsMaster) GetToolbox() string { - if m.settings.DirectMountToolbox { - manifest := strings.ReplaceAll(m.settings.readManifest("direct-mount.yaml"), "name: rook-direct-mount", "name: rook-ceph-tools") - manifest = strings.ReplaceAll(manifest, "name: rook-direct-mount", "name: rook-ceph-tools") - return strings.ReplaceAll(manifest, "app: rook-direct-mount", "app: rook-ceph-tools") - } - return m.settings.readManifest("toolbox.yaml") -} - -//********************************************************************************** -//********************************************************************************** -// After a release, copy the methods below this separator into the versioned file -// such as ceph_manifests_v1.6.go. Methods above this separator do not need to be -// copied since the versioned implementation will load them directly from github. -//********************************************************************************** -//********************************************************************************** - -func (m *CephManifestsMaster) GetCephCluster() string { - crushRoot := "# crushRoot not specified; Rook will use `default`" - if m.settings.Mons == 1 { - crushRoot = `crushRoot: "custom-root"` - } - - pruner := "# daysToRetain not specified;" - if m.settings.UseCrashPruner { - pruner = "daysToRetain: 5" - } - - mgrCount := 1 - if m.settings.MultipleMgrs { - mgrCount = 2 - } - if m.settings.UsePVC { - return `apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - # set the name to something different from the namespace - name: ` + m.settings.ClusterName + ` - namespace: ` + m.settings.Namespace + ` -spec: - dataDirHostPath: ` + m.settings.DataDirHostPath + ` - mon: - count: ` + strconv.Itoa(m.settings.Mons) + ` - allowMultiplePerNode: true - volumeClaimTemplate: - spec: - storageClassName: ` + m.settings.StorageClassName + ` - resources: - requests: - storage: 5Gi - cephVersion: - image: ` + m.settings.CephVersion.Image + ` - allowUnsupported: ` + strconv.FormatBool(m.settings.CephVersion.AllowUnsupported) + ` - skipUpgradeChecks: false - continueUpgradeAfterChecksEvenIfNotHealthy: false - mgr: - count: ` + strconv.Itoa(mgrCount) + ` - allowMultiplePerNode: true - dashboard: - enabled: true - network: - hostNetwork: false - crashCollector: - disable: false - ` + pruner + ` - storage: - config: - ` + crushRoot + ` - storageClassDeviceSets: - - name: set1 - count: 1 - portable: false - tuneDeviceClass: true - encrypted: false - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - storageClassName: ` + m.settings.StorageClassName + ` - volumeMode: Block - accessModes: - - ReadWriteOnce - disruptionManagement: - managePodBudgets: true - osdMaintenanceTimeout: 30 - pgHealthCheckTimeout: 0 - manageMachineDisruptionBudgets: false - machineDisruptionBudgetNamespace: openshift-machine-api` - } - - return `apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: ` + m.settings.ClusterName + ` - namespace: ` + m.settings.Namespace + ` -spec: - cephVersion: - image: ` + m.settings.CephVersion.Image + ` - allowUnsupported: ` + strconv.FormatBool(m.settings.CephVersion.AllowUnsupported) + ` - dataDirHostPath: ` + m.settings.DataDirHostPath + ` - network: - hostNetwork: false - crashCollector: - disable: false - ` + pruner + ` - mon: - count: ` + strconv.Itoa(m.settings.Mons) + ` - allowMultiplePerNode: true - dashboard: - enabled: true - skipUpgradeChecks: true - metadataDevice: - storage: - useAllNodes: ` + strconv.FormatBool(!m.settings.SkipOSDCreation) + ` - useAllDevices: ` + strconv.FormatBool(!m.settings.SkipOSDCreation) + ` - deviceFilter: ` + getDeviceFilter() + ` - config: - databaseSizeMB: "1024" - journalSizeMB: "1024" - mgr: - modules: - - name: pg_autoscaler - enabled: true - - name: rook - enabled: true - healthCheck: - daemonHealth: - mon: - interval: 10s - timeout: 15s - osd: - interval: 10s - status: - interval: 5s` -} - -func (m *CephManifestsMaster) GetBlockSnapshotClass(snapshotClassName, reclaimPolicy string) string { - // Create a CSI driver snapshotclass - return ` -apiVersion: snapshot.storage.k8s.io/v1beta1 -kind: VolumeSnapshotClass -metadata: - name: ` + snapshotClassName + ` -driver: ` + m.settings.OperatorNamespace + `.rbd.csi.ceph.com -deletionPolicy: ` + reclaimPolicy + ` -parameters: - clusterID: ` + m.settings.Namespace + ` - csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/snapshotter-secret-namespace: ` + m.settings.Namespace + ` -` -} - -func (m *CephManifestsMaster) GetFileStorageSnapshotClass(snapshotClassName, reclaimPolicy string) string { - // Create a CSI driver snapshotclass - return ` -apiVersion: snapshot.storage.k8s.io/v1beta1 -kind: VolumeSnapshotClass -metadata: - name: ` + snapshotClassName + ` -driver: ` + m.settings.OperatorNamespace + `.cephfs.csi.ceph.com -deletionPolicy: ` + reclaimPolicy + ` -parameters: - clusterID: ` + m.settings.Namespace + ` - csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/snapshotter-secret-namespace: ` + m.settings.Namespace + ` -` -} - -func (m *CephManifestsMaster) GetBlockPool(poolName string, replicaSize string) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: ` + poolName + ` - namespace: ` + m.settings.Namespace + ` -spec: - replicated: - size: ` + replicaSize + ` - targetSizeRatio: .5 - requireSafeReplicaSize: false - compressionMode: aggressive - mirroring: - enabled: true - mode: image - quotas: - maxSize: 10Gi - maxObjects: 1000000 - statusCheck: - mirror: - disabled: false - interval: 10s` -} - -func (m *CephManifestsMaster) GetBlockStorageClass(poolName, storageClassName, reclaimPolicy string) string { - // Create a CSI driver storage class - if m.settings.UseCSI { - return ` -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ` + storageClassName + ` -provisioner: ` + m.settings.OperatorNamespace + `.rbd.csi.ceph.com -reclaimPolicy: ` + reclaimPolicy + ` -parameters: - pool: ` + poolName + ` - clusterID: ` + m.settings.Namespace + ` - csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: ` + m.settings.Namespace + ` - csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node - csi.storage.k8s.io/node-stage-secret-namespace: ` + m.settings.Namespace + ` - imageFeatures: layering - csi.storage.k8s.io/fstype: ext4 -` - } - // Create a FLEX driver storage class - return `apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ` + storageClassName + ` -provisioner: ceph.rook.io/block -allowVolumeExpansion: true -reclaimPolicy: ` + reclaimPolicy + ` -parameters: - blockPool: ` + poolName + ` - clusterNamespace: ` + m.settings.Namespace -} - -func (m *CephManifestsMaster) GetFileStorageClass(fsName, storageClassName string) string { - // Create a CSI driver storage class - csiCephFSNodeSecret := "rook-csi-cephfs-node" //nolint:gosec // We safely suppress gosec in tests file - csiCephFSProvisionerSecret := "rook-csi-cephfs-provisioner" //nolint:gosec // We safely suppress gosec in tests file - return ` -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ` + storageClassName + ` -provisioner: ` + m.settings.OperatorNamespace + `.cephfs.csi.ceph.com -parameters: - clusterID: ` + m.settings.Namespace + ` - fsName: ` + fsName + ` - pool: ` + fsName + `-data0 - csi.storage.k8s.io/provisioner-secret-name: ` + csiCephFSProvisionerSecret + ` - csi.storage.k8s.io/provisioner-secret-namespace: ` + m.settings.Namespace + ` - csi.storage.k8s.io/node-stage-secret-name: ` + csiCephFSNodeSecret + ` - csi.storage.k8s.io/node-stage-secret-namespace: ` + m.settings.Namespace + ` -` -} - -// GetFilesystem returns the manifest to create a Rook filesystem resource with the given config. -func (m *CephManifestsMaster) GetFilesystem(name string, activeCount int) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - metadataPool: - replicated: - size: 1 - requireSafeReplicaSize: false - dataPools: - - replicated: - size: 1 - requireSafeReplicaSize: false - compressionMode: none - metadataServer: - activeCount: ` + strconv.Itoa(activeCount) + ` - activeStandby: true` -} - -// GetFilesystem returns the manifest to create a Rook Ceph NFS resource with the given config. -func (m *CephManifestsMaster) GetNFS(name, pool string, count int) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephNFS -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - rados: - pool: ` + pool + ` - namespace: nfs-ns - server: - active: ` + strconv.Itoa(count) -} - -func (m *CephManifestsMaster) GetObjectStore(name string, replicaCount, port int, tlsEnable bool) string { - if tlsEnable { - return `apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - metadataPool: - replicated: - size: 1 - requireSafeReplicaSize: false - compressionMode: passive - dataPool: - replicated: - size: 1 - requireSafeReplicaSize: false - gateway: - type: s3 - securePort: ` + strconv.Itoa(port) + ` - instances: ` + strconv.Itoa(replicaCount) + ` - sslCertificateRef: ` + name + ` - healthCheck: - bucket: - disabled: false - interval: 10s -` - } - return `apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - metadataPool: - replicated: - size: 1 - requireSafeReplicaSize: false - compressionMode: passive - dataPool: - replicated: - size: 1 - requireSafeReplicaSize: false - gateway: - port: ` + strconv.Itoa(port) + ` - instances: ` + strconv.Itoa(replicaCount) + ` - healthCheck: - bucket: - disabled: false - interval: 5s -` -} - -func (m *CephManifestsMaster) GetObjectStoreUser(name string, displayName string, store string) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephObjectStoreUser -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - displayName: ` + displayName + ` - store: ` + store -} - -//GetBucketStorageClass returns the manifest to create object bucket -func (m *CephManifestsMaster) GetBucketStorageClass(storeName, storageClassName, reclaimPolicy, region string) string { - return `apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ` + storageClassName + ` -provisioner: ` + m.settings.Namespace + `.ceph.rook.io/bucket -reclaimPolicy: ` + reclaimPolicy + ` -parameters: - objectStoreName: ` + storeName + ` - objectStoreNamespace: ` + m.settings.Namespace + ` - region: ` + region -} - -//GetOBC returns the manifest to create object bucket claim -func (m *CephManifestsMaster) GetOBC(claimName string, storageClassName string, objectBucketName string, maxObject string, varBucketName bool) string { - bucketParameter := "generateBucketName" - if varBucketName { - bucketParameter = "bucketName" - } - return `apiVersion: objectbucket.io/v1alpha1 -kind: ObjectBucketClaim -metadata: - name: ` + claimName + ` -spec: - ` + bucketParameter + `: ` + objectBucketName + ` - storageClassName: ` + storageClassName + ` - additionalConfig: - maxObjects: "` + maxObject + `"` -} - -func (m *CephManifestsMaster) GetClient(claimName string, caps map[string]string) string { - clientCaps := []string{} - for name, cap := range caps { - str := name + ": " + cap - clientCaps = append(clientCaps, str) - } - return `apiVersion: ceph.rook.io/v1 -kind: CephClient -metadata: - name: ` + claimName + ` - namespace: ` + m.settings.Namespace + ` -spec: - caps: - ` + strings.Join(clientCaps, "\n ") -} - -func (m *CephManifestsMaster) GetExternalCephCluster() string { - return `apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: ` + m.settings.ClusterName + ` - namespace: ` + m.settings.Namespace + ` -spec: - external: - enable: true - dataDirHostPath: ` + m.settings.DataDirHostPath + ` - healthCheck: - daemonHealth: - status: - interval: 5s` -} - -// GetRBDMirror returns the manifest to create a Rook Ceph RBD Mirror resource with the given config. -func (m *CephManifestsMaster) GetRBDMirror(name string, count int) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephRBDMirror -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - count: ` + strconv.Itoa(count) -} diff --git a/tests/framework/installer/ceph_manifests_v1.6.go b/tests/framework/installer/ceph_manifests_v1.6.go deleted file mode 100644 index 0bb4da38f..000000000 --- a/tests/framework/installer/ceph_manifests_v1.6.go +++ /dev/null @@ -1,445 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "strconv" - "strings" - - "github.com/rook/rook/tests/framework/utils" -) - -const ( - // The version from which the upgrade test will start - Version1_6 = "v1.6.7" -) - -// CephManifestsV1_6 wraps rook yaml definitions -type CephManifestsV1_6 struct { - settings *TestCephSettings -} - -func (m *CephManifestsV1_6) Settings() *TestCephSettings { - return m.settings -} - -func (m *CephManifestsV1_6) GetCRDs(k8shelper *utils.K8sHelper) string { - if k8shelper.VersionAtLeast("v1.16.0") { - return m.settings.readManifestFromGithub("crds.yaml") - } - return m.settings.readManifestFromGithub("pre-k8s-1.16/crds.yaml") -} - -// GetRookOperator returns rook Operator manifest -func (m *CephManifestsV1_6) GetOperator() string { - var manifest string - if utils.IsPlatformOpenShift() { - manifest = m.settings.readManifestFromGithub("operator-openshift.yaml") - } else { - manifest = m.settings.readManifestFromGithub("operator.yaml") - } - return m.settings.replaceOperatorSettings(manifest) -} - -// GetCommon returns rook-cluster manifest -func (m *CephManifestsV1_6) GetCommon() string { - return m.settings.readManifestFromGithub("common.yaml") -} - -// GetRookToolBox returns rook-toolbox manifest -func (m *CephManifestsV1_6) GetToolbox() string { - if m.settings.DirectMountToolbox { - manifest := strings.ReplaceAll(m.settings.readManifestFromGithub("direct-mount.yaml"), "name: rook-direct-mount", "name: rook-ceph-tools") - return strings.ReplaceAll(manifest, "app: rook-direct-mount", "app: rook-ceph-tools") - } - return m.settings.readManifestFromGithub("toolbox.yaml") -} - -func (m *CephManifestsV1_6) GetCommonExternal() string { - return m.settings.readManifestFromGithub("common-external.yaml") -} - -//********************************************************************************** -//********************************************************************************** -// After a release, replace the methods below this separator from the -// ceph_manifests.go. Methods above this separator do not need to be -// copied since they will load them directly from github. -//********************************************************************************** -//********************************************************************************** - -// GetRookCluster returns rook-cluster manifest -func (m *CephManifestsV1_6) GetCephCluster() string { - crushRoot := "# crushRoot not specified; Rook will use `default`" - if m.settings.Mons == 1 { - crushRoot = `crushRoot: "custom-root"` - } - - if m.settings.UsePVC { - return `apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - # set the name to something different from the namespace - name: ` + m.settings.ClusterName + ` - namespace: ` + m.settings.Namespace + ` -spec: - dataDirHostPath: ` + m.settings.DataDirHostPath + ` - mon: - count: ` + strconv.Itoa(m.settings.Mons) + ` - allowMultiplePerNode: true - volumeClaimTemplate: - spec: - storageClassName: ` + m.settings.StorageClassName + ` - resources: - requests: - storage: 5Gi - cephVersion: - image: ` + m.settings.CephVersion.Image + ` - allowUnsupported: ` + strconv.FormatBool(m.settings.CephVersion.AllowUnsupported) + ` - skipUpgradeChecks: false - continueUpgradeAfterChecksEvenIfNotHealthy: false - dashboard: - enabled: true - network: - hostNetwork: false - crashCollector: - disable: false - storage: - config: - ` + crushRoot + ` - storageClassDeviceSets: - - name: set1 - count: 1 - portable: false - tuneDeviceClass: true - encrypted: false - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - storageClassName: ` + m.settings.StorageClassName + ` - volumeMode: Block - accessModes: - - ReadWriteOnce - disruptionManagement: - managePodBudgets: false - osdMaintenanceTimeout: 30 - pgHealthCheckTimeout: 0 - manageMachineDisruptionBudgets: false - machineDisruptionBudgetNamespace: openshift-machine-api` - } - - return `apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: ` + m.settings.ClusterName + ` - namespace: ` + m.settings.Namespace + ` -spec: - cephVersion: - image: ` + m.settings.CephVersion.Image + ` - allowUnsupported: ` + strconv.FormatBool(m.settings.CephVersion.AllowUnsupported) + ` - dataDirHostPath: ` + m.settings.DataDirHostPath + ` - network: - hostNetwork: false - mon: - count: ` + strconv.Itoa(m.settings.Mons) + ` - allowMultiplePerNode: true - dashboard: - enabled: true - skipUpgradeChecks: true - metadataDevice: - storage: - useAllNodes: ` + strconv.FormatBool(!m.settings.SkipOSDCreation) + ` - useAllDevices: ` + strconv.FormatBool(!m.settings.SkipOSDCreation) + ` - deviceFilter: '' - config: - databaseSizeMB: "1024" - journalSizeMB: "1024" - mgr: - modules: - - name: pg_autoscaler - enabled: true - - name: rook - enabled: true - healthCheck: - daemonHealth: - mon: - interval: 10s - timeout: 15s - osd: - interval: 10s - status: - interval: 5s` -} - -func (m *CephManifestsV1_6) GetBlockSnapshotClass(snapshotClassName, reclaimPolicy string) string { - // Create a CSI driver snapshotclass - return ` -apiVersion: snapshot.storage.k8s.io/v1beta1 -kind: VolumeSnapshotClass -metadata: - name: ` + snapshotClassName + ` -driver: ` + m.settings.OperatorNamespace + `.rbd.csi.ceph.com -deletionPolicy: ` + reclaimPolicy + ` -parameters: - clusterID: ` + m.settings.Namespace + ` - csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/snapshotter-secret-namespace: ` + m.settings.Namespace + ` -` -} - -func (m *CephManifestsV1_6) GetFileStorageSnapshotClass(snapshotClassName, reclaimPolicy string) string { - // Create a CSI driver snapshotclass - return ` -apiVersion: snapshot.storage.k8s.io/v1beta1 -kind: VolumeSnapshotClass -metadata: - name: ` + snapshotClassName + ` -driver: ` + m.settings.OperatorNamespace + `.cephfs.csi.ceph.com -deletionPolicy: ` + reclaimPolicy + ` -parameters: - clusterID: ` + m.settings.Namespace + ` - csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/snapshotter-secret-namespace: ` + m.settings.Namespace + ` -` -} - -func (m *CephManifestsV1_6) GetBlockPool(poolName, replicaSize string) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: ` + poolName + ` - namespace: ` + m.settings.Namespace + ` -spec: - replicated: - size: ` + replicaSize + ` - targetSizeRatio: .5 - requireSafeReplicaSize: false - compressionMode: aggressive - mirroring: - enabled: true - mode: image - quotas: - maxBytes: 10737418240 - maxObjects: 1000000 - statusCheck: - mirror: - disabled: false - interval: 10s` -} - -func (m *CephManifestsV1_6) GetBlockStorageClass(poolName, storageClassName, reclaimPolicy string) string { - // Create a CSI driver storage class - if m.settings.UseCSI { - return ` -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ` + storageClassName + ` -provisioner: ` + m.settings.OperatorNamespace + `.rbd.csi.ceph.com -reclaimPolicy: ` + reclaimPolicy + ` -parameters: - pool: ` + poolName + ` - clusterID: ` + m.settings.Namespace + ` - csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: ` + m.settings.Namespace + ` - csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node - csi.storage.k8s.io/node-stage-secret-namespace: ` + m.settings.Namespace + ` - imageFeatures: layering - csi.storage.k8s.io/fstype: ext4 -` - } - // Create a FLEX driver storage class - return `apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ` + storageClassName + ` -provisioner: ceph.rook.io/block -allowVolumeExpansion: true -reclaimPolicy: ` + reclaimPolicy + ` -parameters: - blockPool: ` + poolName + ` - clusterNamespace: ` + m.settings.Namespace -} - -func (m *CephManifestsV1_6) GetFileStorageClass(fsName, storageClassName string) string { - // Create a CSI driver storage class - csiCephFSNodeSecret := "rook-csi-cephfs-node" //nolint:gosec // We safely suppress gosec in tests file - csiCephFSProvisionerSecret := "rook-csi-cephfs-provisioner" //nolint:gosec // We safely suppress gosec in tests file - return ` -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ` + storageClassName + ` -provisioner: ` + m.settings.OperatorNamespace + `.cephfs.csi.ceph.com -parameters: - clusterID: ` + m.settings.Namespace + ` - fsName: ` + fsName + ` - pool: ` + fsName + `-data0 - csi.storage.k8s.io/provisioner-secret-name: ` + csiCephFSProvisionerSecret + ` - csi.storage.k8s.io/provisioner-secret-namespace: ` + m.settings.Namespace + ` - csi.storage.k8s.io/node-stage-secret-name: ` + csiCephFSNodeSecret + ` - csi.storage.k8s.io/node-stage-secret-namespace: ` + m.settings.Namespace + ` -` -} - -// GetFilesystem returns the manifest to create a Rook filesystem resource with the given config. -func (m *CephManifestsV1_6) GetFilesystem(name string, activeCount int) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - metadataPool: - replicated: - size: 1 - requireSafeReplicaSize: false - dataPools: - - replicated: - size: 1 - requireSafeReplicaSize: false - compressionMode: none - metadataServer: - activeCount: ` + strconv.Itoa(activeCount) + ` - activeStandby: true` -} - -// GetFilesystem returns the manifest to create a Rook Ceph NFS resource with the given config. -func (m *CephManifestsV1_6) GetNFS(name, pool string, count int) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephNFS -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - rados: - pool: ` + pool + ` - namespace: nfs-ns - server: - active: ` + strconv.Itoa(count) -} - -func (m *CephManifestsV1_6) GetObjectStore(name string, replicaCount, port int, tlsEnable bool) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - metadataPool: - replicated: - size: 1 - requireSafeReplicaSize: false - compressionMode: passive - dataPool: - replicated: - size: 1 - requireSafeReplicaSize: false - gateway: - sslCertificateRef: - port: ` + strconv.Itoa(port) + ` - instances: ` + strconv.Itoa(replicaCount) + ` - healthCheck: - bucket: - disabled: false - interval: 10s -` -} - -func (m *CephManifestsV1_6) GetObjectStoreUser(name string, displayName string, store string) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephObjectStoreUser -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - displayName: ` + displayName + ` - store: ` + store -} - -//GetBucketStorageClass returns the manifest to create object bucket -func (m *CephManifestsV1_6) GetBucketStorageClass(storeName, storageClassName, reclaimPolicy, region string) string { - return `apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ` + storageClassName + ` -provisioner: ` + m.settings.Namespace + `.ceph.rook.io/bucket -reclaimPolicy: ` + reclaimPolicy + ` -parameters: - objectStoreName: ` + storeName + ` - objectStoreNamespace: ` + m.settings.Namespace + ` - region: ` + region -} - -//GetOBC returns the manifest to create object bucket claim -func (m *CephManifestsV1_6) GetOBC(claimName, storageClassName, objectBucketName, maxObject string, varBucketName bool) string { - bucketParameter := "generateBucketName" - if varBucketName { - bucketParameter = "bucketName" - } - return `apiVersion: objectbucket.io/v1alpha1 -kind: ObjectBucketClaim -metadata: - name: ` + claimName + ` -spec: - ` + bucketParameter + `: ` + objectBucketName + ` - storageClassName: ` + storageClassName + ` - additionalConfig: - maxObjects: "` + maxObject + `"` -} - -func (m *CephManifestsV1_6) GetClient(claimName string, caps map[string]string) string { - clientCaps := []string{} - for name, cap := range caps { - str := name + ": " + cap - clientCaps = append(clientCaps, str) - } - return `apiVersion: ceph.rook.io/v1 -kind: CephClient -metadata: - name: ` + claimName + ` - namespace: ` + m.settings.Namespace + ` -spec: - caps: - ` + strings.Join(clientCaps, "\n ") -} - -func (m *CephManifestsV1_6) GetExternalCephCluster() string { - return `apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: ` + m.settings.Namespace + ` - namespace: ` + m.settings.Namespace + ` -spec: - external: - enable: true - dataDirHostPath: ` + m.settings.DataDirHostPath + `` -} - -// GetRBDMirror returns the manifest to create a Rook Ceph RBD Mirror resource with the given config. -func (m *CephManifestsV1_6) GetRBDMirror(name string, count int) string { - return `apiVersion: ceph.rook.io/v1 -kind: CephRBDMirror -metadata: - name: ` + name + ` - namespace: ` + m.settings.Namespace + ` -spec: - count: ` + strconv.Itoa(count) -} diff --git a/tests/framework/installer/ceph_settings.go b/tests/framework/installer/ceph_settings.go deleted file mode 100644 index bb11e2dcd..000000000 --- a/tests/framework/installer/ceph_settings.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "fmt" - "os" - "strings" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" -) - -// TestCephSettings struct for handling panic and test suite tear down -type TestCephSettings struct { - DataDirHostPath string - ClusterName string - Namespace string - OperatorNamespace string - StorageClassName string - UseHelm bool - RetainHelmDefaultStorageCRs bool - UsePVC bool - Mons int - UseCrashPruner bool - MultipleMgrs bool - SkipOSDCreation bool - UseCSI bool - EnableDiscovery bool - EnableAdmissionController bool - IsExternal bool - SkipClusterCleanup bool - SkipCleanupPolicy bool - DirectMountToolbox bool - EnableVolumeReplication bool - RookVersion string - CephVersion cephv1.CephVersionSpec -} - -func (s *TestCephSettings) ApplyEnvVars() { - // skip the cleanup by default - s.SkipClusterCleanup = true - if os.Getenv("SKIP_TEST_CLEANUP") == "false" { - s.SkipClusterCleanup = false - } - s.SkipCleanupPolicy = true - if os.Getenv("SKIP_CLEANUP_POLICY") == "false" { - s.SkipCleanupPolicy = false - } -} - -func (s *TestCephSettings) readManifest(filename string) string { - manifest := readManifest("ceph", filename) - return replaceNamespaces(manifest, manifest, s.OperatorNamespace, s.Namespace) -} - -func (s *TestCephSettings) readManifestFromGithub(filename string) string { - return s.readManifestFromGithubWithClusterNamespace(filename, s.Namespace) -} - -func (s *TestCephSettings) readManifestFromGithubWithClusterNamespace(filename, clusterNamespace string) string { - manifest := readManifestFromGithub(s.RookVersion, "ceph", filename) - return replaceNamespaces(filename, manifest, s.OperatorNamespace, clusterNamespace) -} - -func (s *TestCephSettings) replaceOperatorSettings(manifest string) string { - manifest = strings.ReplaceAll(manifest, `# CSI_LOG_LEVEL: "0"`, `CSI_LOG_LEVEL: "5"`) - manifest = strings.ReplaceAll(manifest, `ROOK_ENABLE_DISCOVERY_DAEMON: "false"`, fmt.Sprintf(`ROOK_ENABLE_DISCOVERY_DAEMON: "%t"`, s.EnableDiscovery)) - manifest = strings.ReplaceAll(manifest, `ROOK_ENABLE_FLEX_DRIVER: "false"`, fmt.Sprintf(`ROOK_ENABLE_FLEX_DRIVER: "%t"`, !s.UseCSI)) - manifest = strings.ReplaceAll(manifest, `ROOK_CSI_ENABLE_CEPHFS: "true"`, fmt.Sprintf(`ROOK_CSI_ENABLE_CEPHFS: "%t"`, s.UseCSI)) - manifest = strings.ReplaceAll(manifest, `ROOK_CSI_ENABLE_RBD: "true"`, fmt.Sprintf(`ROOK_CSI_ENABLE_RBD: "%t"`, s.UseCSI)) - manifest = strings.ReplaceAll(manifest, `CSI_ENABLE_VOLUME_REPLICATION: "false"`, fmt.Sprintf(`CSI_ENABLE_VOLUME_REPLICATION: "%t"`, s.EnableVolumeReplication)) - return manifest -} - -func replaceNamespaces(name, manifest, operatorNamespace, clusterNamespace string) string { - // RBAC and related namespaces - manifest = strings.ReplaceAll(manifest, "rook-ceph # namespace:operator", operatorNamespace) - manifest = strings.ReplaceAll(manifest, "rook-ceph # namespace:cluster", clusterNamespace) - manifest = strings.ReplaceAll(manifest, "rook-ceph-external # namespace:cluster", clusterNamespace) - // Double space only needed for v1.5 upgrade test - manifest = strings.ReplaceAll(manifest, "rook-ceph # namespace:operator", operatorNamespace) - - // SCC namespaces for operator and Ceph daemons - manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-ceph-system # serviceaccount:namespace:operator", operatorNamespace+":rook-ceph-system") - manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-ceph-mgr # serviceaccount:namespace:cluster", clusterNamespace+":rook-ceph-mgr") - manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-ceph-osd # serviceaccount:namespace:cluster", clusterNamespace+":rook-ceph-osd") - - // SCC namespaces for CSI driver - manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-csi-rbd-plugin-sa # serviceaccount:namespace:operator", operatorNamespace+":rook-csi-rbd-plugin-sa") - manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-csi-rbd-provisioner-sa # serviceaccount:namespace:operator", operatorNamespace+":rook-csi-rbd-provisioner-sa") - manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-csi-cephfs-plugin-sa # serviceaccount:namespace:operator", operatorNamespace+":rook-csi-cephfs-plugin-sa") - manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-csi-cephfs-provisioner-sa # serviceaccount:namespace:operator", operatorNamespace+":rook-csi-cephfs-provisioner-sa") - - // CSI Drivers - manifest = strings.ReplaceAll(manifest, "rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator", operatorNamespace+".cephfs.csi.ceph.com") - manifest = strings.ReplaceAll(manifest, "rook-ceph.rbd.csi.ceph.com # driver:namespace:operator", operatorNamespace+".rbd.csi.ceph.com") - - // Bucket storage class - manifest = strings.ReplaceAll(manifest, "rook-ceph.ceph.rook.io/bucket # driver:namespace:cluster", clusterNamespace+".ceph.rook.io/bucket") - if strings.Contains(manifest, "namespace:operator") || strings.Contains(manifest, "namespace:cluster") || strings.Contains(manifest, "driver:namespace:") || strings.Contains(manifest, "serviceaccount:namespace:") { - logger.Infof("BAD MANIFEST:\n%s", manifest) - panic(fmt.Sprintf("manifest %s still contains a namespace identifier", name)) - } - return manifest -} diff --git a/tests/framework/installer/device.go b/tests/framework/installer/device.go index 6292743ee..c81e57714 100644 --- a/tests/framework/installer/device.go +++ b/tests/framework/installer/device.go @@ -19,8 +19,8 @@ package installer import ( "strings" - "github.com/rook/rook/pkg/util/exec" - "github.com/rook/rook/pkg/util/sys" + "github.com/rook/cassandra/pkg/util/exec" + "github.com/rook/cassandra/pkg/util/sys" ) // IsAdditionalDeviceAvailableOnCluster checks whether a given device is available to become an OSD diff --git a/tests/framework/installer/environment.go b/tests/framework/installer/environment.go index b41b96c2c..98983274c 100644 --- a/tests/framework/installer/environment.go +++ b/tests/framework/installer/environment.go @@ -20,11 +20,6 @@ import ( "os" ) -// testHelmPath gets the helm path -func testHelmPath() string { - return getEnvVarWithDefault("TEST_HELM_PATH", "/tmp/rook-tests-scripts-helm/helm") -} - // TestLogCollectionLevel gets whether to collect all logs func TestLogCollectionLevel() string { return getEnvVarWithDefault("TEST_LOG_COLLECTION_LEVEL", "") @@ -49,31 +44,11 @@ func UsePVC() bool { return StorageClassName() != "" } -// baseTestDir gets the base test directory -func baseTestDir() (string, error) { - // If the base test directory is actively set to WORKING_DIR (as in CI), - // we use the current working directory. - val := getEnvVarWithDefault("TEST_BASE_DIR", "/data") - if val == "WORKING_DIR" { - var err error - val, err = os.Getwd() - if err != nil { - return "", err - } - } - return val, nil -} - // TestScratchDevice get the scratch device to be used for OSD func TestScratchDevice() string { return getEnvVarWithDefault("TEST_SCRATCH_DEVICE", "/dev/nvme0n1") } -// getDeviceFilter get the device name used for OSD -func getDeviceFilter() string { - return getEnvVarWithDefault("DEVICE_FILTER", `""`) -} - func getEnvVarWithDefault(env, defaultValue string) string { val := os.Getenv(env) if val == "" { diff --git a/tests/framework/installer/installer.go b/tests/framework/installer/installer.go index 929b10e90..207f8b1ac 100644 --- a/tests/framework/installer/installer.go +++ b/tests/framework/installer/installer.go @@ -22,7 +22,7 @@ import ( "testing" "github.com/coreos/pkg/capnslog" - "github.com/rook/rook/tests/framework/utils" + "github.com/rook/cassandra/tests/framework/utils" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/errors" ) @@ -38,7 +38,7 @@ const ( ) var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "installer") + logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "installer") createArgs = []string{"create", "-f"} createFromStdinArgs = append(createArgs, "-") deleteArgs = []string{"delete", "-f"} diff --git a/tests/framework/installer/nfs_installer.go b/tests/framework/installer/nfs_installer.go deleted file mode 100644 index 6edfb5ac8..000000000 --- a/tests/framework/installer/nfs_installer.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "context" - "fmt" - "testing" - - "github.com/rook/rook/tests/framework/utils" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - nfsServerCRD = "nfsservers.nfs.rook.io" -) - -type NFSInstaller struct { - k8shelper *utils.K8sHelper - manifests *NFSManifests - T func() *testing.T -} - -func NewNFSInstaller(k8shelper *utils.K8sHelper, t func() *testing.T) *NFSInstaller { - return &NFSInstaller{k8shelper, &NFSManifests{}, t} -} - -// InstallNFSServer installs NFS operator, NFS CRD instance and NFS volume -func (h *NFSInstaller) InstallNFSServer(systemNamespace, namespace string, count int) error { - h.k8shelper.CreateAnonSystemClusterBinding() - - // install hostpath provisioner if there isn't already a default storage class - storageClassName := "" - defaultExists, err := h.k8shelper.IsDefaultStorageClassPresent() - if err != nil { - return err - } else if !defaultExists { - if err := CreateHostPathPVs(h.k8shelper, 2, false, "2Mi"); err != nil { - return err - } - } else { - logger.Info("skipping install of host path provisioner because a default storage class already exists") - } - - // install nfs operator - if err := h.CreateNFSServerOperator(systemNamespace); err != nil { - return err - } - - // install nfs server instance - if err := h.CreateNFSServer(namespace, count, storageClassName); err != nil { - return err - } - - // install nfs server volume - if err := h.CreateNFSServerVolume(namespace); err != nil { - return err - } - - return nil -} - -// CreateNFSServerOperator creates nfs server in the provided namespace -func (h *NFSInstaller) CreateNFSServerOperator(namespace string) error { - logger.Infof("starting nfsserver operator") - - logger.Info("creating nfsserver CRDs") - if _, err := h.k8shelper.KubectlWithStdin(h.manifests.GetNFSServerCRDs(), createFromStdinArgs...); err != nil { - return err - } - - nfsOperator := h.manifests.GetNFSServerOperator(namespace) - _, err := h.k8shelper.KubectlWithStdin(nfsOperator, createFromStdinArgs...) - if err != nil { - return fmt.Errorf("failed to create rook-nfs-operator pod: %+v ", err) - } - - if !h.k8shelper.IsCRDPresent(nfsServerCRD) { - return fmt.Errorf("failed to find nfs CRD %s", nfsServerCRD) - } - - if !h.k8shelper.IsPodInExpectedState("rook-nfs-operator", namespace, "Running") { - return fmt.Errorf("rook-nfs-operator is not running, aborting") - } - - logger.Infof("nfs operator started") - return nil -} - -// CreateNFSServer creates the NFS Server CRD instance -func (h *NFSInstaller) CreateNFSServer(namespace string, count int, storageClassName string) error { - if err := h.k8shelper.CreateNamespace(namespace); err != nil { - return err - } - - logger.Infof("starting nfs server with kubectl and yaml") - nfsServer := h.manifests.GetNFSServer(namespace, count, storageClassName) - if _, err := h.k8shelper.KubectlWithStdin(nfsServer, createFromStdinArgs...); err != nil { - return fmt.Errorf("Failed to create nfs server: %+v ", err) - } - - if err := h.k8shelper.WaitForPodCount("app="+namespace, namespace, 1); err != nil { - logger.Errorf("nfs server pods in namespace %s not found", namespace) - return err - } - - err := h.k8shelper.WaitForLabeledPodsToRun("app="+namespace, namespace) - if err != nil { - logger.Errorf("nfs server pods in namespace %s are not running", namespace) - return err - } - - logger.Infof("nfs server started") - return nil -} - -// CreateNFSServerVolume creates NFS export PV and PVC -func (h *NFSInstaller) CreateNFSServerVolume(namespace string) error { - logger.Info("creating volume from nfs server in namespace %s", namespace) - - nfsServerPVC := h.manifests.GetNFSServerPVC(namespace) - - logger.Info("creating nfs server pvc") - if _, err := h.k8shelper.KubectlWithStdin(nfsServerPVC, createFromStdinArgs...); err != nil { - return err - } - - return nil -} - -// UninstallNFSServer uninstalls the NFS Server from the given namespace -func (h *NFSInstaller) UninstallNFSServer(systemNamespace, namespace string) { - ctx := context.TODO() - logger.Infof("uninstalling nfsserver from namespace %s", namespace) - - err := h.k8shelper.DeleteResource("pvc", "nfs-pv-claim") - checkError(h.T(), err, "cannot remove nfs pvc : nfs-pv-claim") - - err = h.k8shelper.DeleteResource("pvc", "nfs-pv-claim-bigger") - checkError(h.T(), err, "cannot remove nfs pvc : nfs-pv-claim-bigger") - - err = h.k8shelper.DeleteResource("pv", "nfs-pv") - checkError(h.T(), err, "cannot remove nfs pv : nfs-pv") - - err = h.k8shelper.DeleteResource("pv", "nfs-pv1") - checkError(h.T(), err, "cannot remove nfs pv : nfs-pv1") - - err = h.k8shelper.DeleteResource("-n", namespace, "nfsservers.nfs.rook.io", namespace) - checkError(h.T(), err, fmt.Sprintf("cannot remove nfsserver %s", namespace)) - - crdCheckerFunc := func() error { - _, err := h.k8shelper.RookClientset.NfsV1alpha1().NFSServers(namespace).Get(ctx, namespace, metav1.GetOptions{}) - return err - } - err = h.k8shelper.WaitForCustomResourceDeletion(namespace, namespace, crdCheckerFunc) - checkError(h.T(), err, fmt.Sprintf("failed to wait for crd %s deletion", namespace)) - - err = h.k8shelper.DeleteResource("namespace", namespace) - checkError(h.T(), err, fmt.Sprintf("cannot delete namespace %s", namespace)) - - logger.Infof("removing the operator from namespace %s", systemNamespace) - err = h.k8shelper.DeleteResource("crd", "nfsservers.nfs.rook.io") - checkError(h.T(), err, "cannot delete CRDs") - - nfsOperator := h.manifests.GetNFSServerOperator(systemNamespace) - _, err = h.k8shelper.KubectlWithStdin(nfsOperator, deleteFromStdinArgs...) - checkError(h.T(), err, "cannot uninstall rook-nfs-operator") - - err = DeleteHostPathPVs(h.k8shelper) - checkError(h.T(), err, "cannot uninstall hostpath provisioner") - - h.k8shelper.Clientset.RbacV1().ClusterRoleBindings().Delete(ctx, "anon-user-access", metav1.DeleteOptions{}) //nolint // asserting this failing in CI - h.k8shelper.Clientset.RbacV1().ClusterRoleBindings().Delete(ctx, "run-nfs-client-provisioner", metav1.DeleteOptions{}) //nolint // asserting this failing in CI - h.k8shelper.Clientset.RbacV1().ClusterRoles().Delete(ctx, "nfs-client-provisioner-runner", metav1.DeleteOptions{}) //nolint // asserting this failing in CI - logger.Infof("done removing the operator from namespace %s", systemNamespace) -} - -// GatherAllNFSServerLogs gathers all NFS Server logs -func (h *NFSInstaller) GatherAllNFSServerLogs(systemNamespace, namespace, testName string) { - if !h.T().Failed() && TestLogCollectionLevel() != "all" { - return - } - logger.Infof("Gathering all logs from NFSServer %s", namespace) - h.k8shelper.GetLogsFromNamespace(systemNamespace, testName, utils.TestEnvName()) - h.k8shelper.GetLogsFromNamespace(namespace, testName, utils.TestEnvName()) -} diff --git a/tests/framework/installer/nfs_manifests.go b/tests/framework/installer/nfs_manifests.go deleted file mode 100644 index b5a270271..000000000 --- a/tests/framework/installer/nfs_manifests.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "strconv" - "strings" -) - -type NFSManifests struct { -} - -// GetNFSServerCRDs returns NFSServer CRD definition -func (n *NFSManifests) GetNFSServerCRDs() string { - manifest := readManifest("nfs", "crds.yaml") - logger.Info(manifest) - return manifest -} - -// GetNFSServerOperator returns the NFSServer operator definition -func (n *NFSManifests) GetNFSServerOperator(namespace string) string { - manifest := readManifest("nfs", "operator.yaml") - manifest = strings.ReplaceAll(manifest, "rook-nfs-system # namespace:operator", namespace) - return manifest -} - -// GetNFSServerPV returns NFSServer PV definition -func (n *NFSManifests) GetNFSServerPV(namespace string, clusterIP string) string { - return `apiVersion: v1 -kind: PersistentVolume -metadata: - name: nfs-pv - namespace: ` + namespace + ` - annotations: - volume.beta.kubernetes.io/mount-options: "vers=4.1" -spec: - storageClassName: nfs-sc - capacity: - storage: 1Mi - accessModes: - - ReadWriteMany - nfs: - server: ` + clusterIP + ` - path: "/test-claim" ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: nfs-pv1 - namespace: ` + namespace + ` - annotations: - volume.beta.kubernetes.io/mount-options: "vers=4.1" -spec: - storageClassName: nfs-sc - capacity: - storage: 2Mi - accessModes: - - ReadWriteMany - nfs: - server: ` + clusterIP + ` - path: "/test-claim1" -` -} - -// GetNFSServerPVC returns NFSServer PVC definition -func (n *NFSManifests) GetNFSServerPVC(namespace string) string { - return ` ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - labels: - app: rook-nfs - name: nfs-ns-nfs-share -parameters: - exportName: nfs-share - nfsServerName: ` + namespace + ` - nfsServerNamespace: ` + namespace + ` -provisioner: nfs.rook.io/` + namespace + `-provisioner -reclaimPolicy: Delete -volumeBindingMode: Immediate ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - labels: - app: rook-nfs - name: nfs-ns-nfs-share1 -parameters: - exportName: nfs-share1 - nfsServerName: ` + namespace + ` - nfsServerNamespace: ` + namespace + ` -provisioner: nfs.rook.io/` + namespace + `-provisioner -reclaimPolicy: Delete -volumeBindingMode: Immediate ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-pv-claim -spec: - storageClassName: nfs-ns-nfs-share - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-pv-claim-bigger -spec: - storageClassName: nfs-ns-nfs-share1 - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Mi -` -} - -// GetNFSServer returns NFSServer CRD instance definition -func (n *NFSManifests) GetNFSServer(namespace string, count int, storageClassName string) string { - return ` -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-server - namespace: ` + namespace + ` ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-provisioner-runner -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "update", "patch"] - - apiGroups: [""] - resources: ["services", "endpoints"] - verbs: ["get"] - - apiGroups: ["extensions"] - resources: ["podsecuritypolicies"] - resourceNames: ["nfs-provisioner"] - verbs: ["use"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: - - nfs.rook.io - resources: - - "*" - verbs: - - "*" ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: run-nfs-provisioner -subjects: - - kind: ServiceAccount - name: rook-nfs-server - namespace: ` + namespace + ` -roleRef: - kind: ClusterRole - name: rook-nfs-provisioner-runner - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: test-claim - namespace: ` + namespace + ` -spec: - storageClassName: ` + storageClassName + ` - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: test-claim1 - namespace: ` + namespace + ` -spec: - storageClassName: ` + storageClassName + ` - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Mi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: ` + namespace + ` - namespace: ` + namespace + ` -spec: - replicas: ` + strconv.Itoa(count) + ` - exports: - - name: nfs-share - server: - accessMode: ReadWrite - squash: "none" - persistentVolumeClaim: - claimName: test-claim - - name: nfs-share1 - server: - accessMode: ReadWrite - squash: "none" - persistentVolumeClaim: - claimName: test-claim1 -` -} diff --git a/tests/framework/installer/provisioners.go b/tests/framework/installer/provisioners.go index 9d2e24ca2..3f58fc78d 100644 --- a/tests/framework/installer/provisioners.go +++ b/tests/framework/installer/provisioners.go @@ -20,7 +20,7 @@ import ( "fmt" "io/ioutil" - "github.com/rook/rook/tests/framework/utils" + "github.com/rook/cassandra/tests/framework/utils" "k8s.io/apimachinery/pkg/api/errors" ) diff --git a/tests/framework/installer/settings.go b/tests/framework/installer/settings.go index 033cf957a..f09b0ff5b 100644 --- a/tests/framework/installer/settings.go +++ b/tests/framework/installer/settings.go @@ -17,14 +17,11 @@ limitations under the License. package installer import ( - "fmt" "io/ioutil" - "net/http" "path" - "time" "github.com/pkg/errors" - "github.com/rook/rook/tests/framework/utils" + "github.com/rook/cassandra/tests/framework/utils" ) func readManifest(provider, filename string) string { @@ -40,30 +37,3 @@ func readManifest(provider, filename string) string { } return string(contents) } - -func readManifestFromGithub(rookVersion, provider, filename string) string { - url := fmt.Sprintf("https://raw.githubusercontent.com/rook/rook/%s/cluster/examples/kubernetes/%s/%s", rookVersion, provider, filename) - logger.Infof("Retrieving manifest: %s", url) - var response *http.Response - var err error - for i := 1; i <= 3; i++ { - // #nosec G107 This is only test code and is expected to read from a url - response, err = http.Get(url) - if err != nil { - if i == 3 { - panic(errors.Wrapf(err, "failed to read manifest from %s", url)) - } - logger.Warningf("failed to read manifest from %s. retrying in 1sec. %v", url, err) - time.Sleep(time.Second) - continue - } - break - } - defer response.Body.Close() - - content, err := ioutil.ReadAll(response.Body) - if err != nil { - panic(errors.Wrapf(err, "failed to read content from %s", url)) - } - return string(content) -} diff --git a/tests/framework/utils/exec_utils.go b/tests/framework/utils/exec_utils.go index adc2d4b6b..6d587b687 100644 --- a/tests/framework/utils/exec_utils.go +++ b/tests/framework/utils/exec_utils.go @@ -24,10 +24,10 @@ import ( "strings" "github.com/coreos/pkg/capnslog" - utilexec "github.com/rook/rook/pkg/util/exec" + utilexec "github.com/rook/cassandra/pkg/util/exec" ) -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "testutil") +var logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "testutil") // CommandArgs is a warpper for cmd args type CommandArgs struct { diff --git a/tests/framework/utils/helm_helper.go b/tests/framework/utils/helm_helper.go index c9163af61..134190e81 100644 --- a/tests/framework/utils/helm_helper.go +++ b/tests/framework/utils/helm_helper.go @@ -18,13 +18,14 @@ package utils import ( "fmt" - "gopkg.in/yaml.v2" "os" "path" "path/filepath" + "gopkg.in/yaml.v2" + "github.com/pkg/errors" - "github.com/rook/rook/pkg/util/exec" + "github.com/rook/cassandra/pkg/util/exec" ) // HelmHelper is wrapper for running helm commands diff --git a/tests/framework/utils/k8s_helper.go b/tests/framework/utils/k8s_helper.go index 3fbbc847e..33a95567c 100644 --- a/tests/framework/utils/k8s_helper.go +++ b/tests/framework/utils/k8s_helper.go @@ -33,11 +33,9 @@ import ( "github.com/coreos/pkg/capnslog" "github.com/pkg/errors" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/ceph/cluster/crash" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" + rookclient "github.com/rook/cassandra/pkg/client/clientset/versioned" + "github.com/rook/cassandra/pkg/clusterd" + "github.com/rook/cassandra/pkg/util/exec" "github.com/stretchr/testify/require" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -107,7 +105,7 @@ func CreateK8sHelper(t func() *testing.T) (*K8sHelper, error) { } var ( - k8slogger = capnslog.NewPackageLogger("github.com/rook/rook", "utils") + k8slogger = capnslog.NewPackageLogger("github.com/rook/cassandra", "utils") cmd = getCmd() // RetryLoop params for tests. RetryLoop = TestRetryNumber() @@ -1702,35 +1700,6 @@ func (k8sh *K8sHelper) WaitForLabeledDeploymentsToBeReadyWithRetries(label, name return fmt.Errorf("giving up waiting for deployment(s) with label %s in namespace %s to be ready", label, namespace) } -func (k8sh *K8sHelper) WaitForCronJob(name, namespace string) error { - k8sVersion, err := k8sutil.GetK8SVersion(k8sh.Clientset) - if err != nil { - return errors.Wrap(err, "failed to get k8s version") - } - useCronJobV1 := k8sVersion.AtLeast(version.MustParseSemantic(crash.MinVersionForCronV1)) - for i := 0; i < RetryLoop; i++ { - var err error - if useCronJobV1 { - _, err = k8sh.Clientset.BatchV1().CronJobs(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - } else { - _, err = k8sh.Clientset.BatchV1beta1().CronJobs(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - } - if err != nil { - if kerrors.IsNotFound(err) { - logger.Infof("waiting for CronJob named %s in namespace %s", name, namespace) - time.Sleep(RetryInterval * time.Second) - continue - } - - return fmt.Errorf("failed to find CronJob named %s. %+v", name, err) - } - - logger.Infof("found CronJob with name %s in namespace %s", name, namespace) - return nil - } - return fmt.Errorf("giving up waiting for CronJob named %s in namespace %s", name, namespace) -} - func (k8sh *K8sHelper) GetResourceStatus(kind, name, namespace string) (string, error) { return k8sh.Kubectl("-n", namespace, "get", kind, name) // TODO: -o status } diff --git a/tests/integration/ceph_base_block_test.go b/tests/integration/ceph_base_block_test.go deleted file mode 100644 index a4e8e2ef9..000000000 --- a/tests/integration/ceph_base_block_test.go +++ /dev/null @@ -1,677 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "context" - "fmt" - "strconv" - "strings" - "testing" - "time" - - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func checkSkipCSITest(t *testing.T, k8sh *utils.K8sHelper) { - if !k8sh.VersionAtLeast("v1.14.0") { - logger.Info("Skipping tests as kube version is less than 1.14.0 for the CSI driver") - t.Skip() - } -} - -func skipSnapshotTest(k8sh *utils.K8sHelper) bool { - minVersion := "v1.17.0" - if !k8sh.VersionAtLeast(minVersion) { - logger.Infof("Skipping snapshot tests as kubernetes version is less than %q for the CSI driver", minVersion) - return true - } - return false -} - -func skipCloneTest(k8sh *utils.K8sHelper) bool { - minVersion := "v1.16.0" - if !k8sh.VersionAtLeast(minVersion) { - logger.Infof("Skipping snapshot tests as kubernetes version is less than %q for the CSI driver", minVersion) - return true - } - return false -} - -func blockCSICloneTest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, storageClassName string) { - // create pvc and app - pvcSize := "1Gi" - pvcName := "parent-pvc" - podName := "demo-pod" - readOnly := false - mountPoint := "/var/lib/test" - logger.Infof("create a PVC") - err := helper.BlockClient.CreatePVC(defaultNamespace, pvcName, storageClassName, "ReadWriteOnce", pvcSize) - require.NoError(s.T(), err) - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, pvcName), "Make sure PVC is Bound") - - logger.Infof("bind PVC to application") - err = helper.BlockClient.CreatePod(podName, pvcName, defaultNamespace, mountPoint, readOnly) - assert.NoError(s.T(), err) - - logger.Infof("check pod is in running state") - require.True(s.T(), k8sh.IsPodRunning(podName, defaultNamespace), "make sure pod is in running state") - logger.Infof("Storage Mounted successfully") - - // write data to pvc get the checksum value - logger.Infof("write data to pvc") - cmd := fmt.Sprintf("dd if=/dev/zero of=%s/file.out bs=1MB count=10 status=none conv=fsync && md5sum %s/file.out", mountPoint, mountPoint) - resp, err := k8sh.RunCommandInPod(defaultNamespace, podName, cmd) - require.NoError(s.T(), err) - pvcChecksum := strings.Fields(resp) - require.Equal(s.T(), len(pvcChecksum), 2) - - clonePVCName := "clone-pvc" - logger.Infof("create a new pvc from pvc") - err = helper.BlockClient.CreatePVCClone(defaultNamespace, clonePVCName, pvcName, storageClassName, "ReadWriteOnce", pvcSize) - require.NoError(s.T(), err) - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, clonePVCName), "Make sure PVC is Bound") - - clonePodName := "clone-pod" - logger.Infof("bind PVC clone to application") - err = helper.BlockClient.CreatePod(clonePodName, clonePVCName, defaultNamespace, mountPoint, readOnly) - assert.NoError(s.T(), err) - - logger.Infof("check pod is in running state") - require.True(s.T(), k8sh.IsPodRunning(clonePodName, defaultNamespace), "make sure pod is in running state") - logger.Infof("Storage Mounted successfully") - - // get the checksum of the data and validate it - logger.Infof("check md5sum of both pvc and clone data is same") - cmd = fmt.Sprintf("md5sum %s/file.out", mountPoint) - resp, err = k8sh.RunCommandInPod(defaultNamespace, clonePodName, cmd) - require.NoError(s.T(), err) - clonePVCChecksum := strings.Fields(resp) - require.Equal(s.T(), len(clonePVCChecksum), 2) - - // compare the checksum value and verify the values are equal - assert.Equal(s.T(), clonePVCChecksum[0], pvcChecksum[0]) - // delete clone PVC and app - logger.Infof("delete clone pod") - - err = k8sh.DeletePod(k8sutil.DefaultNamespace, clonePodName) - require.NoError(s.T(), err) - logger.Infof("delete clone pvc") - - err = helper.BlockClient.DeletePVC(defaultNamespace, clonePVCName) - assertNoErrorUnlessNotFound(s, err) - assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, clonePVCName)) - - // delete the parent PVC and app - err = k8sh.DeletePod(k8sutil.DefaultNamespace, podName) - require.NoError(s.T(), err) - logger.Infof("delete parent pvc") - - err = helper.BlockClient.DeletePVC(defaultNamespace, pvcName) - assertNoErrorUnlessNotFound(s, err) - assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, pvcName)) -} - -func blockCSISnapshotTest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, storageClassName, namespace string) { - logger.Infof("install snapshot CRD") - err := k8sh.CreateSnapshotCRD() - require.NoError(s.T(), err) - - logger.Infof("install snapshot controller") - err = k8sh.CreateSnapshotController() - require.NoError(s.T(), err) - - logger.Infof("check snapshot controller is running") - err = k8sh.WaitForSnapshotController(15) - require.NoError(s.T(), err) - // create snapshot class - snapshotDeletePolicy := "Delete" - snapshotClassName := "snapshot-testing" - logger.Infof("create snapshotclass") - err = helper.BlockClient.CreateSnapshotClass(snapshotClassName, snapshotDeletePolicy, namespace) - require.NoError(s.T(), err) - // create pvc and app - pvcSize := "1Gi" - pvcName := "snap-pvc" - podName := "demo-pod" - readOnly := false - mountPoint := "/var/lib/test" - logger.Infof("create a PVC") - err = helper.BlockClient.CreatePVC(defaultNamespace, pvcName, storageClassName, "ReadWriteOnce", pvcSize) - require.NoError(s.T(), err) - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, pvcName), "Make sure PVC is Bound") - - logger.Infof("bind PVC to application") - err = helper.BlockClient.CreatePod(podName, pvcName, defaultNamespace, mountPoint, readOnly) - assert.NoError(s.T(), err) - - logger.Infof("check pod is in running state") - require.True(s.T(), k8sh.IsPodRunning(podName, defaultNamespace), "make sure pod is in running state") - logger.Infof("Storage Mounted successfully") - - // write data to pvc get the checksum value - logger.Infof("write data to pvc") - cmd := fmt.Sprintf("dd if=/dev/zero of=%s/file.out bs=1MB count=10 status=none conv=fsync && md5sum %s/file.out", mountPoint, mountPoint) - resp, err := k8sh.RunCommandInPod(defaultNamespace, podName, cmd) - require.NoError(s.T(), err) - pvcChecksum := strings.Fields(resp) - require.Equal(s.T(), len(pvcChecksum), 2) - // create a snapshot - snapshotName := "rbd-pvc-snapshot" - logger.Infof("create a snapshot from pvc") - err = helper.BlockClient.CreateSnapshot(snapshotName, pvcName, snapshotClassName, defaultNamespace) - require.NoError(s.T(), err) - restorePVCName := "restore-block-pvc" - // check snapshot is in ready state - ready, err := k8sh.CheckSnapshotISReadyToUse(snapshotName, defaultNamespace, 15) - require.NoError(s.T(), err) - require.True(s.T(), ready, "make sure snapshot is in ready state") - // create restore from snapshot and bind it to app - logger.Infof("restore pvc to a new snapshot") - err = helper.BlockClient.CreatePVCRestore(defaultNamespace, restorePVCName, snapshotName, storageClassName, "ReadWriteOnce", pvcSize) - require.NoError(s.T(), err) - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, restorePVCName), "Make sure PVC is Bound") - - restorePodName := "restore-pod" - logger.Infof("bind PVC Restore to application") - err = helper.BlockClient.CreatePod(restorePodName, restorePVCName, defaultNamespace, mountPoint, readOnly) - assert.NoError(s.T(), err) - - logger.Infof("check pod is in running state") - require.True(s.T(), k8sh.IsPodRunning(restorePodName, defaultNamespace), "make sure pod is in running state") - logger.Infof("Storage Mounted successfully") - - // get the checksum of the data and validate it - logger.Infof("check md5sum of both pvc and restore data is same") - cmd = fmt.Sprintf("md5sum %s/file.out", mountPoint) - resp, err = k8sh.RunCommandInPod(defaultNamespace, restorePodName, cmd) - require.NoError(s.T(), err) - restorePVCChecksum := strings.Fields(resp) - require.Equal(s.T(), len(restorePVCChecksum), 2) - - // compare the checksum value and verify the values are equal - assert.Equal(s.T(), restorePVCChecksum[0], pvcChecksum[0]) - // delete clone PVC and app - logger.Infof("delete restore pod") - - err = k8sh.DeletePod(k8sutil.DefaultNamespace, restorePodName) - require.NoError(s.T(), err) - logger.Infof("delete restore pvc") - - err = helper.BlockClient.DeletePVC(defaultNamespace, restorePVCName) - assertNoErrorUnlessNotFound(s, err) - assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, restorePVCName)) - - // delete the snapshot - logger.Infof("delete snapshot") - - err = helper.BlockClient.DeleteSnapshot(snapshotName, pvcName, snapshotClassName, defaultNamespace) - require.NoError(s.T(), err) - logger.Infof("delete application pod") - - // delete the parent PVC and app - err = k8sh.DeletePod(k8sutil.DefaultNamespace, podName) - require.NoError(s.T(), err) - logger.Infof("delete parent pvc") - - err = helper.BlockClient.DeletePVC(defaultNamespace, pvcName) - assertNoErrorUnlessNotFound(s, err) - assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, pvcName)) - - logger.Infof("delete snapshotclass") - - err = helper.BlockClient.DeleteSnapshotClass(snapshotClassName, snapshotDeletePolicy, namespace) - require.NoError(s.T(), err) - logger.Infof("delete snapshot-controller") - - err = k8sh.DeleteSnapshotController() - require.NoError(s.T(), err) - logger.Infof("delete snapshot CRD") - - // remove snapshotcontroller and delete snapshot CRD - err = k8sh.DeleteSnapshotCRD() - require.NoError(s.T(), err) -} - -// Smoke Test for Block Storage - Test check the following operations on Block Storage in order -// Create,Mount,Write,Read,Expand,Unmount and Delete. -func runBlockCSITest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string) { - checkSkipCSITest(s.T(), k8sh) - - podName := "block-test" - poolName := "replicapool" - storageClassName := "rook-ceph-block" - blockName := "block-pv-claim" - podNameWithPVRetained := "block-test-retained" - poolNameRetained := "replicapoolretained" - storageClassNameRetained := "rook-ceph-block-retained" - blockNameRetained := "block-pv-claim-retained" - - clusterInfo := client.AdminClusterInfo(namespace) - defer blockTestDataCleanUp(helper, k8sh, s, clusterInfo, poolName, storageClassName, blockName, podName, true) - defer blockTestDataCleanUp(helper, k8sh, s, clusterInfo, poolNameRetained, storageClassNameRetained, blockNameRetained, podNameWithPVRetained, true) - logger.Infof("Block Storage End to End Integration Test - create, mount, write to, read from, and unmount") - logger.Infof("Running on Rook Cluster %s", namespace) - - logger.Infof("Step 0 : Get Initial List Block") - initBlockImages, _ := helper.BlockClient.ListAllImages(clusterInfo) - assert.Equal(s.T(), 0, len(initBlockImages), "there should not already be any images in the pool") - - logger.Infof("step 1: Create block storage") - err := helper.BlockClient.CreateStorageClassAndPVC(defaultNamespace, poolName, storageClassName, "Delete", blockName, "ReadWriteOnce") - require.NoError(s.T(), err) - require.NoError(s.T(), retryBlockImageCountCheck(helper, clusterInfo, 1), "Make sure a new block is created") - err = helper.BlockClient.CreateStorageClassAndPVC(defaultNamespace, poolNameRetained, storageClassNameRetained, "Retain", blockNameRetained, "ReadWriteOnce") - require.NoError(s.T(), err) - require.NoError(s.T(), retryBlockImageCountCheck(helper, clusterInfo, 2), "Make sure another new block is created") - logger.Infof("Block Storage created successfully") - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, blockName), "Make sure PVC is Bound") - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, blockNameRetained), "Make sure PVC with reclaimPolicy:Retain is Bound") - - logger.Infof("step 2: Mount block storage") - createPodWithBlock(helper, k8sh, s, namespace, storageClassName, podName, blockName) - createPodWithBlock(helper, k8sh, s, namespace, storageClassName, podNameWithPVRetained, blockNameRetained) - - logger.Infof("step 3: Write to block storage") - message := "Smoke Test Data for Block storage" - filename := "bsFile1" - err = k8sh.WriteToPod("", podName, filename, message) - assert.NoError(s.T(), err) - logger.Infof("Write to Block storage successfully") - - logger.Infof("step 4: Read from block storage") - err = k8sh.ReadFromPod("", podName, filename, message) - assert.NoError(s.T(), err) - logger.Infof("Read from Block storage successfully") - - logger.Infof("step 5: Restart the OSDs to confirm they are still healthy after restart") - restartOSDPods(k8sh, s, namespace) - - logger.Infof("step 6: Read from block storage again") - err = k8sh.ReadFromPod("", podName, filename, message) - assert.NoError(s.T(), err) - logger.Infof("Read from Block storage successfully") - - logger.Infof("step 7: Mount same block storage on a different pod. Should not be allowed") - otherPod := "block-test2" - err = helper.BlockClient.CreateClientPod(getCSIBlockPodDefinition(otherPod, blockName, defaultNamespace, storageClassName, false)) - assert.NoError(s.T(), err) - - // ** FIX: WHY IS THE RWO VOLUME NOT BEING FENCED??? The second pod is starting successfully with the same PVC - //require.True(s.T(), k8sh.IsPodInError(otherPod, defaultNamespace, "FailedMount", "Volume is already attached by pod"), "make sure block-test2 pod errors out while mounting the volume") - //logger.Infof("Block Storage successfully fenced") - - logger.Infof("step 8: Delete fenced pod") - err = k8sh.DeletePod(k8sutil.DefaultNamespace, otherPod) - require.NoError(s.T(), err) - require.True(s.T(), k8sh.IsPodTerminated(otherPod, defaultNamespace), "make sure block-test2 pod is terminated") - logger.Infof("Fenced pod deleted successfully") - - logger.Infof("step 9: Unmount block storage") - err = k8sh.DeletePod(k8sutil.DefaultNamespace, podName) - require.NoError(s.T(), err) - err = k8sh.DeletePod(k8sutil.DefaultNamespace, podNameWithPVRetained) - require.NoError(s.T(), err) - require.True(s.T(), k8sh.IsPodTerminated(podName, defaultNamespace), "make sure block-test pod is terminated") - require.True(s.T(), k8sh.IsPodTerminated(podNameWithPVRetained, defaultNamespace), "make sure block-test-retained pod is terminated") - logger.Infof("Block Storage unmounted successfully") - - logger.Infof("step 10: Deleting block storage") - deletePVC(helper, k8sh, s, clusterInfo, blockName, "Delete") - deletePVC(helper, k8sh, s, clusterInfo, blockNameRetained, "Retain") - - logger.Infof("step 11: Delete storage classes and pools") - err = helper.PoolClient.DeletePool(helper.BlockClient, clusterInfo, poolName) - assert.NoError(s.T(), err) - err = helper.PoolClient.DeletePool(helper.BlockClient, clusterInfo, poolNameRetained) - assert.NoError(s.T(), err) - err = helper.BlockClient.DeleteStorageClass(storageClassName) - assert.NoError(s.T(), err) - err = helper.BlockClient.DeleteStorageClass(storageClassNameRetained) - assert.NoError(s.T(), err) -} - -func deletePVC(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, clusterInfo *client.ClusterInfo, pvcName, retainPolicy string) { - pvName, err := k8sh.GetPVCVolumeName(defaultNamespace, pvcName) - assert.NoError(s.T(), err) - pv, err := k8sh.GetPV(pvName) - require.NoError(s.T(), err) - logger.Infof("deleting ") - err = helper.BlockClient.DeletePVC(defaultNamespace, pvcName) - assert.NoError(s.T(), err) - - assert.Equal(s.T(), retainPolicy, string((*pv).Spec.PersistentVolumeReclaimPolicy)) - if retainPolicy == "Delete" { - assert.True(s.T(), retryPVCheck(k8sh, pvName, false, "")) - logger.Infof("PV: %s deleted successfully", pvName) - assert.NoError(s.T(), retryBlockImageCountCheck(helper, clusterInfo, 1), "Make sure a block is deleted") - logger.Infof("Block Storage deleted successfully") - } else { - assert.True(s.T(), retryPVCheck(k8sh, pvName, true, "Released")) - assert.NoError(s.T(), retryBlockImageCountCheck(helper, clusterInfo, 1), "Make sure a block is retained") - logger.Infof("Block Storage retained") - _, err = k8sh.Kubectl("delete", "pv", pvName) - assert.NoError(s.T(), err) - } -} - -func createPodWithBlock(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, clusterNamespace, storageClassName, podName, pvcName string) { - err := helper.BlockClient.CreateClientPod(getCSIBlockPodDefinition(podName, pvcName, defaultNamespace, storageClassName, false)) - assert.NoError(s.T(), err) - - require.True(s.T(), k8sh.IsPodRunning(podName, defaultNamespace), "make sure block-test pod is in running state") - logger.Infof("Block Storage Mounted successfully") -} - -func restartOSDPods(k8sh *utils.K8sHelper, s suite.Suite, namespace string) { - ctx := context.TODO() - osdLabel := "app=rook-ceph-osd" - - // Delete the osd pod(s) - logger.Infof("Deleting osd pod(s)") - pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: osdLabel}) - assert.NoError(s.T(), err) - - for _, pod := range pods.Items { - options := metav1.DeleteOptions{} - err := k8sh.Clientset.CoreV1().Pods(namespace).Delete(ctx, pod.Name, options) - assert.NoError(s.T(), err) - } - for _, pod := range pods.Items { - logger.Infof("Waiting for osd pod %s to be deleted", pod.Name) - deleted := k8sh.WaitUntilPodIsDeleted(pod.Name, namespace) - assert.True(s.T(), deleted) - } - - // Wait for the new pods to run - logger.Infof("Waiting for new osd pod to run") - err = k8sh.WaitForLabeledPodsToRun(osdLabel, namespace) - assert.NoError(s.T(), err) -} - -func runBlockCSITestLite(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings) { - checkSkipCSITest(s.T(), k8sh) - - logger.Infof("Block Storage End to End Integration Test - create storageclass,pool and pvc") - logger.Infof("Running on Rook Cluster %s", settings.Namespace) - clusterInfo := client.AdminClusterInfo(settings.Namespace) - poolName := "rookpool" - storageClassName := "rook-ceph-block-lite" - blockName := "test-block-claim-lite" - podName := "test-pod-lite" - defer blockTestDataCleanUp(helper, k8sh, s, clusterInfo, poolName, storageClassName, blockName, podName, true) - setupBlockLite(helper, k8sh, s, clusterInfo, poolName, storageClassName, blockName, podName) - if !skipSnapshotTest(k8sh) { - blockCSISnapshotTest(helper, k8sh, s, storageClassName, settings.Namespace) - } - - if !skipCloneTest(k8sh) { - blockCSICloneTest(helper, k8sh, s, storageClassName) - } -} - -func setupBlockLite(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, clusterInfo *client.ClusterInfo, - poolName, storageClassName, blockName, podName string) { - - // Check initial number of blocks - initialBlocks, err := helper.BlockClient.ListAllImages(clusterInfo) - require.NoError(s.T(), err) - initBlockCount := len(initialBlocks) - assert.Equal(s.T(), 0, initBlockCount, "why is there already a block image in the new pool?") - - logger.Infof("step : Create Pool,StorageClass and PVC") - - err = helper.BlockClient.CreateStorageClassAndPVC(defaultNamespace, poolName, storageClassName, "Delete", blockName, "ReadWriteOnce") - require.NoError(s.T(), err) - - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, blockName)) - - // Make sure new block is created - b, err := helper.BlockClient.ListAllImages(clusterInfo) - assert.NoError(s.T(), err) - assert.Equal(s.T(), 1, len(b), "Make sure new block image is created") - poolExists, err := helper.PoolClient.CephPoolExists(clusterInfo.Namespace, poolName) - assert.NoError(s.T(), err) - assert.True(s.T(), poolExists) -} - -func deleteBlockLite(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, clusterInfo *client.ClusterInfo, poolName, storageClassName, blockName string, requireBlockImagesRemoved bool) { - logger.Infof("deleteBlockLite: cleaning up after test") - // Delete pvc and storageclass - err := helper.BlockClient.DeletePVC(defaultNamespace, blockName) - assertNoErrorUnlessNotFound(s, err) - assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, blockName)) - if requireBlockImagesRemoved { - assert.NoError(s.T(), retryBlockImageCountCheck(helper, clusterInfo, 0), "Make sure block images were deleted") - } - - err = helper.PoolClient.DeletePool(helper.BlockClient, clusterInfo, poolName) - assertNoErrorUnlessNotFound(s, err) - err = helper.BlockClient.DeleteStorageClass(storageClassName) - assertNoErrorUnlessNotFound(s, err) - - checkPoolDeleted(helper, s, clusterInfo.Namespace, poolName) -} - -func assertNoErrorUnlessNotFound(s suite.Suite, err error) { - if err == nil || errors.IsNotFound(err) { - return - } - assert.NoError(s.T(), err) -} - -func checkPoolDeleted(helper *clients.TestClient, s suite.Suite, namespace, name string) { - // only retry once to see if the pool was deleted - for i := 0; i < 3; i++ { - found, err := helper.PoolClient.CephPoolExists(namespace, name) - if err != nil { - // try again on failure since the pool may have been in an unexpected state while deleting - logger.Warningf("error getting pools. %+v", err) - } else if !found { - logger.Infof("pool %s is deleted", name) - return - } - logger.Infof("pool %s still exists", name) - time.Sleep(time.Second * utils.RetryInterval) - } - // this is not an assert in order to improve reliability of the tests - logger.Errorf("pool %s was not deleted", name) -} - -func blockTestDataCleanUp(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, clusterInfo *client.ClusterInfo, poolname, storageclassname, blockname, podName string, requireBlockImagesRemoved bool) { - logger.Infof("Cleaning up block storage") - err := k8sh.DeletePod(k8sutil.DefaultNamespace, podName) - if err != nil { - logger.Errorf("failed to delete pod. %v", err) - } - deleteBlockLite(helper, k8sh, s, clusterInfo, poolname, storageclassname, blockname, requireBlockImagesRemoved) -} - -// periodically checking if block image count has changed to expected value -// When creating pvc in k8s platform, it may take some time for the block Image to be bounded -func retryBlockImageCountCheck(helper *clients.TestClient, clusterInfo *client.ClusterInfo, expectedImageCount int) error { - for i := 0; i < utils.RetryLoop; i++ { - blockImages, err := helper.BlockClient.ListAllImages(clusterInfo) - if err != nil { - return err - } - if expectedImageCount == len(blockImages) { - return nil - } - logger.Infof("Waiting for block image count to reach %d. current=%d. %+v", expectedImageCount, len(blockImages), blockImages) - time.Sleep(time.Second * utils.RetryInterval) - } - return fmt.Errorf("timed out waiting for image count to reach %d", expectedImageCount) -} - -func retryPVCheck(k8sh *utils.K8sHelper, name string, exists bool, status string) bool { - for i := 0; i < utils.RetryLoop; i++ { - pv, err := k8sh.GetPV(name) - if err != nil { - if !exists { - return true - } - } - if exists { - if string((*pv).Status.Phase) == status { - return true - } - } - logger.Infof("Waiting for PV %q to have status %q with exists %t", name, status, exists) - time.Sleep(time.Second * utils.RetryInterval) - } - return false -} - -func getCSIBlockPodDefinition(podName, pvcName, namespace, storageClass string, readOnly bool) string { - return ` -apiVersion: v1 -kind: Pod -metadata: - name: ` + podName + ` - namespace: ` + namespace + ` -spec: - containers: - - name: ` + podName + ` - image: busybox - command: - - sh - - "-c" - - "touch ` + utils.TestMountPath + `/csi.test && sleep 3600" - imagePullPolicy: IfNotPresent - env: - volumeMounts: - - mountPath: ` + utils.TestMountPath + ` - name: csivol - volumes: - - name: csivol - persistentVolumeClaim: - claimName: ` + pvcName + ` - readOnly: ` + strconv.FormatBool(readOnly) + ` - restartPolicy: Never -` -} - -func getBlockStatefulSetAndServiceDefinition(namespace, statefulsetName, podName, StorageClassName string) (*v1.Service, *appsv1.StatefulSet) { - service := &v1.Service{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: statefulsetName, - Namespace: namespace, - Labels: map[string]string{ - "app": statefulsetName, - }, - }, - Spec: v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: statefulsetName, - Port: 80, - }, - }, - ClusterIP: "None", - Selector: map[string]string{ - "app": statefulsetName, - }, - }, - } - - var replica int32 = 1 - - labels := map[string]string{ - "app": statefulsetName, - } - - statefulSet := &appsv1.StatefulSet{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "apps/v1", - Kind: "StatefulSet", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: namespace, - }, - Spec: appsv1.StatefulSetSpec{ - ServiceName: statefulsetName, - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Replicas: &replica, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: statefulsetName, - Image: "busybox", - Command: []string{"sleep", "3600"}, - Ports: []v1.ContainerPort{ - { - ContainerPort: 80, - Name: podName, - }, - }, - VolumeMounts: []v1.VolumeMount{ - { - Name: "rookpvc", - MountPath: "/tmp/rook", - }, - }, - }, - }, - }, - }, - VolumeClaimTemplates: []v1.PersistentVolumeClaim{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "rookpvc", - Annotations: map[string]string{ - "volume.beta.kubernetes.io/storage-class": StorageClassName, - }, - }, - Spec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceStorage: *resource.NewQuantity(1.0, resource.BinarySI), - }, - }, - }, - }, - }, - }, - } - - return service, statefulSet -} diff --git a/tests/integration/ceph_base_deploy_test.go b/tests/integration/ceph_base_deploy_test.go deleted file mode 100644 index 204de0e86..000000000 --- a/tests/integration/ceph_base_deploy_test.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "fmt" - "os" - "strings" - "time" - - "testing" - - "github.com/coreos/pkg/capnslog" - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -const ( - defaultNamespace = "default" - // UPDATE these versions when the integration test matrix changes - // These versions are for running a minimal test suite for more efficient tests across different versions of K8s - // instead of running all suites on all versions - // To run on multiple versions, add a comma separate list such as 1.16.0,1.17.0 - flexDriverMinimalTestVersion = "1.15.0" - cephMasterSuiteMinimalTestVersion = "1.16.0" - multiClusterMinimalTestVersion = "1.16.0" - helmMinimalTestVersion = "1.17.0" - upgradeMinimalTestVersion = "1.18.0" - smokeSuiteMinimalTestVersion = "1.19.0" -) - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "integrationTest") -) - -// Test to make sure all rook components are installed and Running -func checkIfRookClusterIsInstalled(s suite.Suite, k8sh *utils.K8sHelper, opNamespace, clusterNamespace string, mons int) { - logger.Infof("Make sure all Pods in Rook Cluster %s are running", clusterNamespace) - assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-operator", opNamespace, 1, "Running"), - "Make sure there is 1 rook-operator present in Running state") - assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-mgr", clusterNamespace, 1, "Running"), - "Make sure there is 1 rook-ceph-mgr present in Running state") - assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-osd", clusterNamespace, 1, "Running"), - "Make sure there is at lest 1 rook-ceph-osd present in Running state") - assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-mon", clusterNamespace, mons, "Running"), - fmt.Sprintf("Make sure there are %d rook-ceph-mon present in Running state", mons)) - assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-crashcollector", clusterNamespace, 1, "Running"), - "Make sure there is at lest 1 rook-ceph-crash present in Running state") -} - -func checkIfRookClusterIsHealthy(s suite.Suite, testClient *clients.TestClient, clusterNamespace string) { - logger.Infof("Testing cluster %s health", clusterNamespace) - var err error - - retryCount := 0 - for retryCount < utils.RetryLoop { - healthy, err := clients.IsClusterHealthy(testClient, clusterNamespace) - if healthy { - logger.Infof("cluster %s is healthy", clusterNamespace) - return - } - - retryCount++ - logger.Infof("waiting for cluster %s to become healthy. err: %+v", clusterNamespace, err) - <-time.After(time.Duration(utils.RetryInterval) * time.Second) - } - - require.Nil(s.T(), err) -} - -func HandlePanics(r interface{}, uninstaller func(), t func() *testing.T) { - if r != nil { - logger.Infof("unexpected panic occurred during test %s, --> %v", t().Name(), r) - t().Fail() - uninstaller() - t().FailNow() - } -} - -func checkIfShouldRunForMinimalTestMatrix(t func() *testing.T, k8sh *utils.K8sHelper, version string) { - testArgs := os.Getenv("TEST_ARGUMENTS") - if !strings.Contains(testArgs, "min-test-matrix") { - logger.Infof("running all tests") - return - } - versions := strings.Split(version, ",") - logger.Infof("checking if tests are running on k8s %q", version) - matchedVersion := false - kubeVersion := "" - for _, v := range versions { - kubeVersion, matchedVersion = k8sh.VersionMinorMatches(v) - if matchedVersion { - break - } - } - if !matchedVersion { - logger.Infof("Skipping test suite since kube version %q does not match", kubeVersion) - t().Skip() - } - logger.Infof("Running test suite since kube version is %q", kubeVersion) -} - -// StartTestCluster creates new instance of TestCephSettings struct -func StartTestCluster(t func() *testing.T, settings *installer.TestCephSettings, minimalMatrixK8sVersion string) (*installer.CephInstaller, *utils.K8sHelper) { - k8shelper, err := utils.CreateK8sHelper(t) - require.NoError(t(), err) - checkIfShouldRunForMinimalTestMatrix(t, k8shelper, minimalMatrixK8sVersion) - - // Turn on DEBUG logging - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - - installer := installer.NewCephInstaller(t, k8shelper.Clientset, settings) - isRookInstalled, err := installer.InstallRook() - - if !isRookInstalled || err != nil { - logger.Errorf("Rook was not installed successfully: %v", err) - if !installer.T().Failed() { - installer.GatherAllRookLogs(t().Name(), settings.Namespace, settings.OperatorNamespace) - } - t().Fail() - installer.UninstallRook() - t().FailNow() - } - - return installer, k8shelper -} diff --git a/tests/integration/ceph_base_file_test.go b/tests/integration/ceph_base_file_test.go deleted file mode 100644 index f2acba483..000000000 --- a/tests/integration/ceph_base_file_test.go +++ /dev/null @@ -1,593 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/rook/rook/pkg/daemon/ceph/client" - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - filePodName = "file-test" - fileMountUserPodName = "file-mountuser-test" - fileMountUser = "filemountuser" - fileMountSecret = "file-mountuser-cephkey" //nolint:gosec // We safely suppress gosec in tests file -) - -func fileSystemCSICloneTest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, storageClassName, systemNamespace string) { - // create pvc and app - pvcSize := "1Gi" - pvcName := "parent-pvc" - podName := "demo-pod" - readOnly := false - mountPoint := "/var/lib/test" - logger.Infof("create a PVC") - err := helper.FSClient.CreatePVC(defaultNamespace, pvcName, storageClassName, "ReadWriteOnce", pvcSize) - require.NoError(s.T(), err) - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, pvcName), "Make sure PVC is Bound") - - logger.Infof("bind PVC to application") - err = helper.FSClient.CreatePod(podName, pvcName, defaultNamespace, mountPoint, readOnly) - assert.NoError(s.T(), err) - - logger.Infof("check pod is in running state") - require.True(s.T(), k8sh.IsPodRunning(podName, defaultNamespace), "make sure pod is in running state") - logger.Infof("Storage Mounted successfully") - - // write data to pvc get the checksum value - logger.Infof("write data to pvc") - cmd := fmt.Sprintf("dd if=/dev/zero of=%s/file.out bs=1MB count=10 status=none conv=fsync && md5sum %s/file.out", mountPoint, mountPoint) - resp, err := k8sh.RunCommandInPod(defaultNamespace, podName, cmd) - require.NoError(s.T(), err) - pvcChecksum := strings.Fields(resp) - require.Equal(s.T(), len(pvcChecksum), 2) - - clonePVCName := "clone-pvc" - logger.Infof("create a new pvc from pvc") - err = helper.FSClient.CreatePVCClone(defaultNamespace, clonePVCName, pvcName, storageClassName, "ReadWriteOnce", pvcSize) - require.NoError(s.T(), err) - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, clonePVCName), "Make sure PVC is Bound") - - clonePodName := "clone-pod" - logger.Infof("bind PVC clone to application") - err = helper.FSClient.CreatePod(clonePodName, clonePVCName, defaultNamespace, mountPoint, readOnly) - assert.NoError(s.T(), err) - - logger.Infof("check pod is in running state") - require.True(s.T(), k8sh.IsPodRunning(clonePodName, defaultNamespace), "make sure pod is in running state") - logger.Infof("Storage Mounted successfully") - - // get the checksum of the data and validate it - logger.Infof("check md5sum of both pvc and clone data is same") - cmd = fmt.Sprintf("md5sum %s/file.out", mountPoint) - resp, err = k8sh.RunCommandInPod(defaultNamespace, clonePodName, cmd) - require.NoError(s.T(), err) - clonePVCChecksum := strings.Fields(resp) - require.Equal(s.T(), len(clonePVCChecksum), 2) - - // compare the checksum value and verify the values are equal - assert.Equal(s.T(), clonePVCChecksum[0], pvcChecksum[0]) - // delete clone PVC and app - logger.Infof("delete clone pod") - - err = k8sh.DeletePod(k8sutil.DefaultNamespace, clonePodName) - require.NoError(s.T(), err) - logger.Infof("delete clone pvc") - - err = helper.FSClient.DeletePVC(defaultNamespace, clonePVCName) - assertNoErrorUnlessNotFound(s, err) - assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, clonePVCName)) - - // delete the parent PVC and app - err = k8sh.DeletePod(k8sutil.DefaultNamespace, podName) - require.NoError(s.T(), err) - logger.Infof("delete parent pvc") - - err = helper.FSClient.DeletePVC(defaultNamespace, pvcName) - assertNoErrorUnlessNotFound(s, err) - assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, pvcName)) -} - -func fileSystemCSISnapshotTest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, storageClassName, namespace string) { - logger.Infof("install snapshot CRD") - err := k8sh.CreateSnapshotCRD() - require.NoError(s.T(), err) - - logger.Infof("install snapshot controller") - err = k8sh.CreateSnapshotController() - require.NoError(s.T(), err) - - logger.Infof("check snapshot controller is running") - err = k8sh.WaitForSnapshotController(15) - require.NoError(s.T(), err) - // create snapshot class - snapshotDeletePolicy := "Delete" - snapshotClassName := "snapshot-testing" - logger.Infof("create snapshotclass") - err = helper.FSClient.CreateSnapshotClass(snapshotClassName, snapshotDeletePolicy, namespace) - require.NoError(s.T(), err) - // create pvc and app - pvcSize := "1Gi" - pvcName := "snap-pvc" - podName := "demo-pod" - readOnly := false - mountPoint := "/var/lib/test" - logger.Infof("create a PVC") - err = helper.FSClient.CreatePVC(defaultNamespace, pvcName, storageClassName, "ReadWriteOnce", pvcSize) - require.NoError(s.T(), err) - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, pvcName), "Make sure PVC is Bound") - - logger.Infof("bind PVC to application") - err = helper.FSClient.CreatePod(podName, pvcName, defaultNamespace, mountPoint, readOnly) - assert.NoError(s.T(), err) - - logger.Infof("check pod is in running state") - require.True(s.T(), k8sh.IsPodRunning(podName, defaultNamespace), "make sure pod is in running state") - logger.Infof("Storage Mounted successfully") - - // write data to pvc get the checksum value - logger.Infof("write data to pvc") - cmd := fmt.Sprintf("dd if=/dev/zero of=%s/file.out bs=1MB count=10 status=none conv=fsync && md5sum %s/file.out", mountPoint, mountPoint) - resp, err := k8sh.RunCommandInPod(defaultNamespace, podName, cmd) - require.NoError(s.T(), err) - pvcChecksum := strings.Fields(resp) - require.Equal(s.T(), len(pvcChecksum), 2) - // create a snapshot - snapshotName := "rbd-pvc-snapshot" - logger.Infof("create a snapshot from pvc") - err = helper.FSClient.CreateSnapshot(snapshotName, pvcName, snapshotClassName, defaultNamespace) - require.NoError(s.T(), err) - restorePVCName := "restore-block-pvc" - // check snapshot is in ready state - ready, err := k8sh.CheckSnapshotISReadyToUse(snapshotName, defaultNamespace, 15) - require.NoError(s.T(), err) - require.True(s.T(), ready, "make sure snapshot is in ready state") - // create restore from snapshot and bind it to app - logger.Infof("restore pvc to a new snapshot") - err = helper.FSClient.CreatePVCRestore(defaultNamespace, restorePVCName, snapshotName, storageClassName, "ReadWriteOnce", pvcSize) - require.NoError(s.T(), err) - require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, restorePVCName), "Make sure PVC is Bound") - - restorePodName := "restore-pod" - logger.Infof("bind PVC Restore to application") - err = helper.FSClient.CreatePod(restorePodName, restorePVCName, defaultNamespace, mountPoint, readOnly) - assert.NoError(s.T(), err) - - logger.Infof("check pod is in running state") - require.True(s.T(), k8sh.IsPodRunning(restorePodName, defaultNamespace), "make sure pod is in running state") - logger.Infof("Storage Mounted successfully") - - // get the checksum of the data and validate it - logger.Infof("check md5sum of both pvc and restore data is same") - cmd = fmt.Sprintf("md5sum %s/file.out", mountPoint) - resp, err = k8sh.RunCommandInPod(defaultNamespace, restorePodName, cmd) - require.NoError(s.T(), err) - restorePVCChecksum := strings.Fields(resp) - require.Equal(s.T(), len(restorePVCChecksum), 2) - - // compare the checksum value and verify the values are equal - assert.Equal(s.T(), restorePVCChecksum[0], pvcChecksum[0]) - // delete clone PVC and app - logger.Infof("delete restore pod") - - err = k8sh.DeletePod(k8sutil.DefaultNamespace, restorePodName) - require.NoError(s.T(), err) - logger.Infof("delete restore pvc") - - err = helper.FSClient.DeletePVC(defaultNamespace, restorePVCName) - assertNoErrorUnlessNotFound(s, err) - assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, restorePVCName)) - - // delete the snapshot - logger.Infof("delete snapshot") - - err = helper.FSClient.DeleteSnapshot(snapshotName, pvcName, snapshotClassName, defaultNamespace) - require.NoError(s.T(), err) - logger.Infof("delete application pod") - - // delete the parent PVC and app - err = k8sh.DeletePod(k8sutil.DefaultNamespace, podName) - require.NoError(s.T(), err) - logger.Infof("delete parent pvc") - - err = helper.FSClient.DeletePVC(defaultNamespace, pvcName) - assertNoErrorUnlessNotFound(s, err) - assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, pvcName)) - - logger.Infof("delete snapshotclass") - - err = helper.FSClient.DeleteSnapshotClass(snapshotClassName, snapshotDeletePolicy, namespace) - require.NoError(s.T(), err) - logger.Infof("delete snapshot-controller") - - err = k8sh.DeleteSnapshotController() - require.NoError(s.T(), err) - logger.Infof("delete snapshot CRD") - - // remove snapshotcontroller and delete snapshot CRD - err = k8sh.DeleteSnapshotCRD() - require.NoError(s.T(), err) -} - -// Smoke Test for File System Storage - Test check the following operations on Filesystem Storage in order -// Create,Mount,Write,Read,Unmount and Delete. -func runFileE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings, filesystemName string, preserveFilesystemOnDelete bool) { - if settings.UseCSI { - checkSkipCSITest(s.T(), k8sh) - } - - defer fileTestDataCleanUp(helper, k8sh, s, filePodName, settings.Namespace, filesystemName) - logger.Infof("Running on Rook Cluster %s", settings.Namespace) - logger.Infof("File Storage End To End Integration Test - create, mount, write to, read from, and unmount") - activeCount := 2 - createFilesystem(helper, k8sh, s, settings, filesystemName, activeCount) - - if preserveFilesystemOnDelete { - _, err := k8sh.Kubectl("-n", settings.Namespace, "patch", "CephFilesystem", filesystemName, "--type=merge", "-p", `{"spec": {"preserveFilesystemOnDelete": true}}`) - assert.NoError(s.T(), err) - } - - // Create a test pod where CephFS is consumed without user creds - storageClassName := "cephfs-storageclass" - err := helper.FSClient.CreateStorageClass(filesystemName, settings.OperatorNamespace, settings.Namespace, storageClassName) - assert.NoError(s.T(), err) - createFilesystemConsumerPod(helper, k8sh, s, settings, filesystemName, storageClassName) - - // Test reading and writing to the first pod - err = writeAndReadToFilesystem(helper, k8sh, s, settings.Namespace, filePodName, "test_file") - assert.NoError(s.T(), err) - - // TODO: Also mount with user credentials with the CSI driver - if !settings.UseCSI { - // Create a test pod where CephFS is consumed with a mountUser and mountSecret specified. - createFilesystemMountCephCredentials(helper, k8sh, s, settings, filesystemName) - createFilesystemMountUserConsumerPod(helper, k8sh, s, settings, filesystemName, storageClassName) - - // Test reading and writing to the second pod - err = writeAndReadToFilesystem(helper, k8sh, s, settings.Namespace, fileMountUserPodName, "canttouchthis") - assert.Error(s.T(), err, "we should not be able to write to file canttouchthis on CephFS `/`") - err = writeAndReadToFilesystem(helper, k8sh, s, settings.Namespace, fileMountUserPodName, "foo/test_file") - assert.NoError(s.T(), err, "we should be able to write to the `/foo` directory on CephFS") - - cleanupFilesystemConsumer(helper, k8sh, s, settings.Namespace, fileMountUserPodName) - assert.NoError(s.T(), err) - } - - // Start the NFS daemons - testNFSDaemons(helper, k8sh, s, settings, filesystemName) - - // Cleanup the filesystem and its clients - cleanupFilesystemConsumer(helper, k8sh, s, settings.Namespace, filePodName) - assert.NoError(s.T(), err) - downscaleMetadataServers(helper, k8sh, s, settings.Namespace, filesystemName) - cleanupFilesystem(helper, k8sh, s, settings.Namespace, filesystemName) - err = helper.FSClient.DeleteStorageClass(storageClassName) - assertNoErrorUnlessNotFound(s, err) - - if preserveFilesystemOnDelete { - fses, err := helper.FSClient.List(settings.Namespace) - assert.NoError(s.T(), err) - assert.Len(s.T(), fses, 1) - assert.Equal(s.T(), fses[0].Name, filesystemName) - - err = helper.FSClient.Delete(filesystemName, settings.Namespace) - assert.NoError(s.T(), err) - } -} - -func testNFSDaemons(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings, filesystemName string) { - name := "my-nfs" - err := helper.NFSClient.Create(settings.Namespace, name, filesystemName+"-data0", 2) - require.Nil(s.T(), err) - - err = helper.NFSClient.Delete(settings.Namespace, name) - assert.Nil(s.T(), err) -} - -func createFilesystemConsumerPod(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings, filesystemName, storageClassName string) { - err := createPodWithFilesystem(k8sh, s, settings, filePodName, filesystemName, storageClassName, false) - require.NoError(s.T(), err) - filePodRunning := k8sh.IsPodRunning(filePodName, settings.Namespace) - require.True(s.T(), filePodRunning, "make sure file-test pod is in running state") - logger.Infof("File system mounted successfully") -} - -func writeAndReadToFilesystem(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace, podName, filename string) error { - logger.Infof("Write to file system") - message := "Test Data for file system storage" - if err := k8sh.WriteToPod(namespace, podName, filename, message); err != nil { - return err - } - - return k8sh.ReadFromPod(namespace, podName, filename, message) -} - -func downscaleMetadataServers(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace, fsName string) { - logger.Infof("downscaling file system metadata servers") - err := helper.FSClient.ScaleDown(fsName, namespace) - require.Nil(s.T(), err) -} - -func cleanupFilesystemConsumer(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string, podName string) { - logger.Infof("Delete file System consumer") - err := k8sh.DeletePod(namespace, podName) - assert.Nil(s.T(), err) - if !k8sh.IsPodTerminated(podName, namespace) { - k8sh.PrintPodDescribe(namespace, podName) - assert.Fail(s.T(), fmt.Sprintf("make sure %s pod is terminated", podName)) - } - err = helper.FSClient.DeletePVC(namespace, podName) - assertNoErrorUnlessNotFound(s, err) - logger.Infof("File system consumer deleted") -} - -// cleanupFilesystem cleans up the filesystem and checks if all mds pods are terminated before continuing -func cleanupFilesystem(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string, filesystemName string) { - logger.Infof("Deleting file system") - err := helper.FSClient.Delete(filesystemName, namespace) - assert.Nil(s.T(), err) - logger.Infof("File system %s deleted", filesystemName) -} - -// Test File System Creation on Rook that was installed on a custom namespace i.e. Namespace != "rook" and delete it again -func runFileE2ETestLite(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings, filesystemName string) { - checkSkipCSITest(s.T(), k8sh) - logger.Infof("File Storage End to End Integration Test - create Filesystem and make sure mds pod is running") - logger.Infof("Running on Rook Cluster %s", settings.Namespace) - activeCount := 1 - createFilesystem(helper, k8sh, s, settings, filesystemName, activeCount) - // Create a test pod where CephFS is consumed without user creds - storageClassName := "cephfs-storageclass" - err := helper.FSClient.CreateStorageClass(filesystemName, settings.OperatorNamespace, settings.Namespace, storageClassName) - assert.NoError(s.T(), err) - assert.NoError(s.T(), err) - if !skipSnapshotTest(k8sh) { - fileSystemCSISnapshotTest(helper, k8sh, s, storageClassName, settings.Namespace) - } - - if !skipCloneTest(k8sh) { - fileSystemCSICloneTest(helper, k8sh, s, storageClassName, settings.Namespace) - } - cleanupFilesystem(helper, k8sh, s, settings.Namespace, filesystemName) - err = helper.FSClient.DeleteStorageClass(storageClassName) - assertNoErrorUnlessNotFound(s, err) -} - -func createFilesystem(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings, filesystemName string, activeCount int) { - logger.Infof("Create file System") - fscErr := helper.FSClient.Create(filesystemName, settings.Namespace, activeCount) - require.Nil(s.T(), fscErr) - logger.Infof("File system %s created", filesystemName) - - filesystemList, _ := helper.FSClient.List(settings.Namespace) - require.Equal(s.T(), 1, len(filesystemList), "There should be one shared file system present") -} - -func fileTestDataCleanUp(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, podName string, namespace string, filesystemName string) { - logger.Infof("Cleaning up file system") - err := k8sh.DeletePod(namespace, podName) - assert.NoError(s.T(), err) - err = helper.FSClient.Delete(filesystemName, namespace) - assert.NoError(s.T(), err) -} - -func createPodWithFilesystem(k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings, podName, filesystemName, storageClassName string, mountUser bool) error { - var testPodManifest string - if settings.UseCSI { - testPodManifest = getFilesystemCSITestPod(settings, podName, storageClassName) - } else { - testPodManifest = getFilesystemFlexTestPod(settings, podName, filesystemName, mountUser) - } - if err := k8sh.ResourceOperation("create", testPodManifest); err != nil { - return fmt.Errorf("failed to create pod -- %s. %+v", testPodManifest, err) - } - return nil -} - -func getFilesystemFlexTestPod(settings *installer.TestCephSettings, podName, filesystemName string, mountUser bool) string { - mountUserInsert := "" - if mountUser { - mountUserInsert = ` - mountUser: ` + fileMountUser + ` - mountSecret: ` + fileMountSecret - } - // Bash's sleep signal handling: http://mywiki.wooledge.org/SignalTrap#When_is_the_signal_handled.3F - return `apiVersion: v1 -kind: Pod -metadata: - name: ` + podName + ` - namespace: ` + settings.Namespace + ` -spec: - containers: - - name: ` + podName + ` - image: krallin/ubuntu-tini - command: - - "/usr/local/bin/tini" - - "-g" - - "--" - - "sleep" - - "1800" - imagePullPolicy: IfNotPresent - env: - volumeMounts: - - mountPath: "` + utils.TestMountPath + `" - name: ` + filesystemName + ` - volumes: - - name: ` + filesystemName + ` - flexVolume: - driver: ceph.rook.io/` + settings.OperatorNamespace + ` - fsType: ceph - options: - fsName: ` + filesystemName + ` - clusterNamespace: ` + settings.Namespace + mountUserInsert + ` - restartPolicy: Always -` -} - -func getFilesystemCSITestPod(settings *installer.TestCephSettings, podName, storageClassName string) string { - claimName := podName - return ` -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: ` + claimName + ` - namespace: ` + settings.Namespace + ` -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: ` + storageClassName + ` ---- -apiVersion: v1 -kind: Pod -metadata: - name: ` + podName + ` - namespace: ` + settings.Namespace + ` -spec: - containers: - - name: ` + podName + ` - image: busybox - command: - - sh - - "-c" - - "touch ` + utils.TestMountPath + `/csi.test && sleep 3600" - imagePullPolicy: IfNotPresent - env: - volumeMounts: - - mountPath: ` + utils.TestMountPath + ` - name: csivol - volumes: - - name: csivol - persistentVolumeClaim: - claimName: ` + claimName + ` - readOnly: false - restartPolicy: Never -` -} - -func createFilesystemMountCephCredentials(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings, filesystemName string) { - ctx := context.TODO() - // Create agent binding for access to Secrets - err := k8sh.ResourceOperation("apply", getFilesystemAgentMountSecretsBinding(settings.Namespace)) - require.Nil(s.T(), err) - // Mount CephFS in toolbox and create /foo directory on it - logger.Info("Creating /foo directory on CephFS") - _, err = k8sh.ExecRemote(settings.Namespace, "mkdir", []string{"-p", utils.TestMountPath}) - require.Nil(s.T(), err) - _, err = k8sh.ExecRemoteWithRetry(10, settings.Namespace, "bash", []string{"-c", fmt.Sprintf("mount -t ceph -o mds_namespace=%s,name=admin,secret=$(grep key /etc/ceph/keyring | awk '{print $3}') $(grep mon_host /etc/ceph/ceph.conf | awk '{print $3}'):/ %s", filesystemName, utils.TestMountPath)}) - require.Nil(s.T(), err) - _, err = k8sh.ExecRemote(settings.Namespace, "mkdir", []string{"-p", fmt.Sprintf("%s/foo", utils.TestMountPath)}) - require.Nil(s.T(), err) - _, err = k8sh.ExecRemote(settings.Namespace, "umount", []string{utils.TestMountPath}) - require.Nil(s.T(), err) - logger.Info("Created /foo directory on CephFS") - - // Create Ceph credentials which allow CephFS access to `/foo` but not `/`. - commandArgs := []string{ - "-c", - fmt.Sprintf( - `ceph auth get-or-create-key client.%s mon "allow r" osd "allow rw pool=%s-data0" mds "allow r, allow rw path=/foo"`, - fileMountUser, - filesystemName, - ), - } - logger.Infof("ceph credentials command args: %s", commandArgs[1]) - result, err := k8sh.ExecRemote(settings.Namespace, "bash", commandArgs) - logger.Infof("Ceph filesystem credentials output: %s", result) - logger.Info("Created Ceph credentials") - require.Nil(s.T(), err) - // Save Ceph credentials to Kubernetes - _, err = k8sh.Clientset.CoreV1().Secrets(settings.Namespace).Create(ctx, &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: fileMountSecret, - Namespace: settings.Namespace, - }, - Data: map[string][]byte{ - "mykey": []byte(result), - }, - }, metav1.CreateOptions{}) - require.Nil(s.T(), err) - logger.Info("Created Ceph credentials Secret in Kubernetes") -} - -func createFilesystemMountUserConsumerPod(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings, filesystemName, storageClassName string) { - // TODO: Mount with user credentials for the CSI driver - mtfsErr := createPodWithFilesystem(k8sh, s, settings, fileMountUserPodName, filesystemName, storageClassName, true) - require.Nil(s.T(), mtfsErr) - filePodRunning := k8sh.IsPodRunning(fileMountUserPodName, settings.Namespace) - require.True(s.T(), filePodRunning, "make sure file-mountuser-test pod is in running state") - logger.Infof("File system mounted successfully") -} - -func getFilesystemAgentMountSecretsBinding(namespace string) string { - return `apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-ceph-agent-mount - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-agent-mount -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: ` + namespace + ` -` -} - -func waitForFilesystemActive(k8sh *utils.K8sHelper, clusterInfo *client.ClusterInfo, filesystemName string) error { - command, args := cephclient.FinalizeCephCommandArgs("ceph", clusterInfo, []string{"fs", "status", filesystemName}, k8sh.MakeContext().ConfigDir) - var stat string - var err error - - logger.Infof("waiting for filesystem %q to be active", filesystemName) - for i := 0; i < utils.RetryLoop; i++ { - // run the ceph fs status command - stat, err := k8sh.MakeContext().Executor.ExecuteCommandWithCombinedOutput(command, args...) - if err != nil { - logger.Warningf("failed to get filesystem %q status. %+v", filesystemName, err) - } - - // as long as at least one mds is active, it's okay - if strings.Contains(stat, "active") { - logger.Infof("done waiting for filesystem %q to be active", filesystemName) - return nil - } - logger.Infof("waiting for filesystem %q to be active. status=%s", filesystemName, stat) - time.Sleep(utils.RetryInterval * time.Second) - } - return fmt.Errorf("gave up waiting to get filesystem %q status [err: %+v] Status returned:\n%s", filesystemName, err, stat) -} diff --git a/tests/integration/ceph_base_object_test.go b/tests/integration/ceph_base_object_test.go deleted file mode 100644 index cb54a0165..000000000 --- a/tests/integration/ceph_base_object_test.go +++ /dev/null @@ -1,470 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/daemon/ceph/client" - rgw "github.com/rook/rook/pkg/operator/ceph/object" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - userid = "rook-user" - userdisplayname = "A rook RGW user" - bucketname = "smokebkt" - ObjBody = "Test Rook Object Data" - ObjectKey1 = "rookObj1" - ObjectKey2 = "rookObj2" - ObjectKey3 = "rookObj3" - ObjectKey4 = "rookObj4" - contentType = "plain/text" - obcName = "smoke-delete-bucket" - region = "us-east-1" - maxObject = "2" - newMaxObject = "3" - bucketStorageClassName = "rook-smoke-delete-bucket" -) - -// Smoke Test for ObjectStore - Test check the following operations on ObjectStore in order -// Create object store, Create User, Connect to Object Store, Create Bucket, Read/Write/Delete to bucket, -// Check issues in MGRs, Delete Bucket and Delete user -// Test for ObjectStore with and without TLS enabled -func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string) { - storeName := "tlsteststore" - logger.Info("Object Storage End To End Integration Test with TLS enabled - Create Object Store, User,Bucket and read/write to bucket") - logger.Infof("Running on Rook Cluster %s", namespace) - createCephObjectStore(s, helper, k8sh, namespace, storeName, 3, true) - testObjectStoreOperations(s, helper, k8sh, namespace, storeName) - - storeName = "teststore" - logger.Info("Object Storage End To End Integration Test without TLS - Create Object Store, User,Bucket and read/write to bucket") - logger.Infof("Running on Rook Cluster %s", namespace) - createCephObjectStore(s, helper, k8sh, namespace, storeName, 3, false) - testObjectStoreOperations(s, helper, k8sh, namespace, storeName) -} - -// Test Object StoreCreation on Rook that was installed via helm -func runObjectE2ETestLite(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings, name string, replicaSize int, deleteStore bool) { - logger.Infof("Object Storage End To End Integration Test - Create Object Store and check if rgw service is Running") - logger.Infof("Running on Rook Cluster %s", settings.Namespace) - - logger.Infof("Step 1 : Create Object Store") - err := helper.ObjectClient.Create(settings.Namespace, name, int32(replicaSize), false) - assert.Nil(s.T(), err) - - logger.Infof("Step 2 : check rook-ceph-rgw service status and count") - assert.True(s.T(), k8sh.IsPodInExpectedState("rook-ceph-rgw", settings.Namespace, "Running"), - "Make sure rook-ceph-rgw is in running state") - - assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-rgw", settings.Namespace, replicaSize, "Running"), - "Make sure all rook-ceph-rgw pods are in Running state") - - assert.True(s.T(), k8sh.IsServiceUp("rook-ceph-rgw-"+name, settings.Namespace)) - - if deleteStore { - logger.Infof("Delete Object Store") - err = helper.ObjectClient.Delete(settings.Namespace, name) - assert.Nil(s.T(), err) - logger.Infof("Done deleting object store") - } -} - -func objectStoreCleanUp(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string) { - logger.Infof("Delete Object Store (will fail if users and buckets still exist)") - err := helper.ObjectClient.Delete(namespace, storeName) - assert.Nil(s.T(), err) - logger.Infof("Done deleting object store") -} - -func createCephObjectUser( - s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, - namespace, storeName, userID string, - checkPhase bool, -) { - s.T().Helper() - - cosuErr := helper.ObjectUserClient.Create(namespace, userID, userdisplayname, storeName) - assert.Nil(s.T(), cosuErr) - logger.Infof("Waiting 5 seconds for the object user to be created") - time.Sleep(5 * time.Second) - logger.Infof("Checking to see if the user secret has been created") - for i := 0; i < 6 && helper.ObjectUserClient.UserSecretExists(namespace, storeName, userID) == false; i++ { - logger.Infof("(%d) secret check sleeping for 5 seconds ...", i) - time.Sleep(5 * time.Second) - } - - checkCephObjectUser(s, helper, k8sh, namespace, storeName, userID, checkPhase) -} - -func checkCephObjectUser( - s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, - namespace, storeName, userID string, - checkPhase bool, -) { - s.T().Helper() - - logger.Infof("checking object store \"%s/%s\" user %q", namespace, storeName, userID) - assert.True(s.T(), helper.ObjectUserClient.UserSecretExists(namespace, storeName, userID)) - - userInfo, err := helper.ObjectUserClient.GetUser(namespace, storeName, userID) - assert.NoError(s.T(), err) - assert.Equal(s.T(), userID, userInfo.UserID) - assert.Equal(s.T(), userdisplayname, *userInfo.DisplayName) - - if checkPhase { - // status.phase doesn't exist before Rook v1.6 - phase, err := k8sh.GetResource("--namespace", namespace, "cephobjectstoreuser", userID, "--output", "jsonpath={.status.phase}") - assert.NoError(s.T(), err) - assert.Equal(s.T(), k8sutil.ReadyStatus, phase) - } -} - -func createCephObjectStore(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string, replicaSize int, tlsEnable bool) { - logger.Infof("Create Object Store %q with replica count %d", storeName, replicaSize) - rgwServiceName := "rook-ceph-rgw-" + storeName - if tlsEnable { - generateRgwTlsCertSecret(s, helper, k8sh, namespace, storeName, rgwServiceName) - } - t := s.T() - t.Run("create CephObjectStore", func(t *testing.T) { - err := helper.ObjectClient.Create(namespace, storeName, 3, tlsEnable) - assert.Nil(s.T(), err) - - // check that ObjectStore is created - logger.Infof("Check that RGW pods are Running") - for i := 0; i < 24 && k8sh.CheckPodCountAndState("rook-ceph-rgw", namespace, 1, "Running") == false; i++ { - logger.Infof("(%d) RGW pod check sleeping for 5 seconds ...", i) - time.Sleep(5 * time.Second) - } - assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-rgw", namespace, 1, "Running")) - logger.Info("RGW pods are running") - logger.Infof("Object store %q created successfully", storeName) - }) -} - -func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string) { - ctx := context.TODO() - clusterInfo := client.AdminClusterInfo(namespace) - t := s.T() - t.Run("create CephObjectStoreUser", func(t *testing.T) { - createCephObjectUser(s, helper, k8sh, namespace, storeName, userid, true) - i := 0 - for i = 0; i < 4; i++ { - if helper.ObjectUserClient.UserSecretExists(namespace, storeName, userid) { - break - } - logger.Info("waiting 5 more seconds for user secret to exist") - time.Sleep(5 * time.Second) - } - assert.NotEqual(t, 4, i) - }) - - // Check object store status - t.Run("verify CephObjectStore status", func(t *testing.T) { - i := 0 - for i = 0; i < 10; i++ { - objectStore, err := k8sh.RookClientset.CephV1().CephObjectStores(namespace).Get(ctx, storeName, metav1.GetOptions{}) - assert.Nil(s.T(), err) - if objectStore.Status == nil || objectStore.Status.BucketStatus == nil { - logger.Infof("(%d) bucket status check sleeping for 5 seconds ...", i) - time.Sleep(5 * time.Second) - continue - } - logger.Info("objectstore status is", objectStore.Status) - if objectStore.Status.BucketStatus.Health == cephv1.ConditionFailure { - continue - } - assert.Equal(s.T(), cephv1.ConditionConnected, objectStore.Status.BucketStatus.Health) - // Info field has the endpoint in it - assert.NotEmpty(s.T(), objectStore.Status.Info) - assert.NotEmpty(s.T(), objectStore.Status.Info["endpoint"]) - break - } - assert.NotEqual(t, 10, i) - }) - - context := k8sh.MakeContext() - objectStore, err := k8sh.RookClientset.CephV1().CephObjectStores(namespace).Get(ctx, storeName, metav1.GetOptions{}) - assert.Nil(s.T(), err) - rgwcontext, err := rgw.NewMultisiteContext(context, clusterInfo, objectStore) - assert.Nil(s.T(), err) - t.Run("create ObjectBucketClaim with reclaim policy delete", func(t *testing.T) { - cobErr := helper.BucketClient.CreateBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", region) - assert.Nil(s.T(), cobErr) - cobcErr := helper.BucketClient.CreateObc(obcName, bucketStorageClassName, bucketname, maxObject, true) - assert.Nil(s.T(), cobcErr) - - created := utils.Retry(12, 2*time.Second, "OBC is created", func() bool { - return helper.BucketClient.CheckOBC(obcName, "bound") - }) - assert.True(s.T(), created) - logger.Info("OBC created successfully") - - var bkt rgw.ObjectBucket - i := 0 - for i = 0; i < 4; i++ { - b, code, err := rgw.GetBucket(rgwcontext, bucketname) - if b != nil && err == nil { - bkt = *b - break - } - logger.Warningf("cannot get bucket %q, retrying... bucket: %v. code: %d, err: %v", bucketname, b, code, err) - logger.Infof("(%d) check bucket exists, sleeping for 5 seconds ...", i) - time.Sleep(5 * time.Second) - } - assert.NotEqual(s.T(), 4, i) - assert.Equal(s.T(), bucketname, bkt.Name) - logger.Info("OBC, Secret and ConfigMap created") - }) - - t.Run("use S3 client to put and get objects on OBC bucket", func(t *testing.T) { - var s3client *rgw.S3Agent - s3endpoint, _ := helper.ObjectClient.GetEndPointUrl(namespace, storeName) - s3AccessKey, _ := helper.BucketClient.GetAccessKey(obcName) - s3SecretKey, _ := helper.BucketClient.GetSecretKey(obcName) - if objectStore.Spec.IsTLSEnabled() { - s3client, err = rgw.NewTestOnlyS3Agent(s3AccessKey, s3SecretKey, s3endpoint, true) - } else { - s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, true, nil) - } - - assert.Nil(s.T(), err) - logger.Infof("endpoint (%s) Accesskey (%s) secret (%s)", s3endpoint, s3AccessKey, s3SecretKey) - - t.Run("put object on OBC bucket", func(t *testing.T) { - _, poErr := s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey1, contentType) - assert.Nil(s.T(), poErr) - }) - - t.Run("get object on OBC bucket", func(t *testing.T) { - read, err := s3client.GetObjectInBucket(bucketname, ObjectKey1) - assert.Nil(s.T(), err) - assert.Equal(s.T(), ObjBody, read) - }) - - t.Run("test quota enforcement on OBC bucket", func(t *testing.T) { - _, poErr := s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey2, contentType) - assert.Nil(s.T(), poErr) - logger.Infof("Testing the max object limit") - _, poErr = s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey3, contentType) - assert.Error(s.T(), poErr) - }) - - t.Run("test update quota on OBC bucket", func(t *testing.T) { - poErr := helper.BucketClient.UpdateObc(obcName, bucketStorageClassName, bucketname, newMaxObject, true) - assert.Nil(s.T(), poErr) - updated := utils.Retry(5, 2*time.Second, "OBC is updated", func() bool { - return helper.BucketClient.CheckOBMaxObject(obcName, newMaxObject) - }) - assert.True(s.T(), updated) - logger.Infof("Testing the updated object limit") - _, poErr = s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey3, contentType) - assert.NoError(s.T(), poErr) - _, poErr = s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey4, contentType) - assert.Error(s.T(), poErr) - }) - - t.Run("delete objects on OBC bucket", func(t *testing.T) { - _, delobjErr := s3client.DeleteObjectInBucket(bucketname, ObjectKey1) - assert.Nil(s.T(), delobjErr) - _, delobjErr = s3client.DeleteObjectInBucket(bucketname, ObjectKey2) - assert.Nil(s.T(), delobjErr) - _, delobjErr = s3client.DeleteObjectInBucket(bucketname, ObjectKey3) - assert.Nil(s.T(), delobjErr) - logger.Info("Objects deleted on bucket successfully") - }) - }) - - t.Run("Regression check: Verify bucket does not revert to Pending phase", func(t *testing.T) { - // A bug exists in older versions of lib-bucket-provisioner that will revert a bucket and claim - // back to "Pending" phase after being created and initially "Bound" by looping infinitely in - // the bucket provision/creation loop. Verify that the OBC is "Bound" and stays that way. - // The OBC reconcile loop runs again immediately b/c the OBC is modified to refer to its OB. - // Wait a short amount of time before checking just to be safe. - time.Sleep(15 * time.Second) - assert.True(s.T(), helper.BucketClient.CheckOBC(obcName, "bound")) - }) - - t.Run("delete CephObjectStore should be blocked by OBC bucket and CephObjectStoreUser", func(t *testing.T) { - err := k8sh.DeleteResourceAndWait(false, "-n", namespace, "CephObjectStore", storeName) - assert.NoError(t, err) - // wait initially for the controller to detect deletion. Almost always enough, but not - // waiting will almost always fail the first check in the loop - time.Sleep(2 * time.Second) - - store := &cephv1.CephObjectStore{} - i := 0 - for i = 0; i < 4; i++ { - storeStr, err := k8sh.GetResource("-n", namespace, "CephObjectStore", storeName, "-o", "json") - assert.NoError(t, err) - - err = json.Unmarshal([]byte(storeStr), &store) - assert.NoError(t, err) - - cond := cephv1.FindStatusCondition(store.Status.Conditions, cephv1.ConditionDeletionIsBlocked) - if cond != nil { - break - } - logger.Info("waiting 2 more seconds for CephObjectStore to reach Deleting state") - time.Sleep(2 * time.Second) - } - assert.NotEqual(t, 4, i) - - assert.Equal(t, cephv1.ConditionDeleting, store.Status.Phase) // phase == "Deleting" - // verify deletion is blocked b/c object has dependents - cond := cephv1.FindStatusCondition(store.Status.Conditions, cephv1.ConditionDeletionIsBlocked) - logger.Infof("condition: %+v", cond) - assert.Equal(t, v1.ConditionTrue, cond.Status) - assert.Equal(t, cephv1.ObjectHasDependentsReason, cond.Reason) - // the CephObjectStoreUser and the bucket should both block deletion - assert.Contains(t, cond.Message, "CephObjectStoreUsers") - assert.Contains(t, cond.Message, userid) - assert.Contains(t, cond.Message, "buckets") - assert.Contains(t, cond.Message, bucketname) - - // The event is created by the same method that adds that condition, so we can be pretty - // sure it exists here. No need to do extra work to validate the event. - }) - - t.Run("delete OBC", func(t *testing.T) { - i := 0 - dobcErr := helper.BucketClient.DeleteObc(obcName, bucketStorageClassName, bucketname, maxObject, true) - assert.Nil(s.T(), dobcErr) - logger.Info("Checking to see if the obc, secret and cm have all been deleted") - for i = 0; i < 4 && !helper.BucketClient.CheckOBC(obcName, "deleted"); i++ { - logger.Infof("(%d) obc deleted check, sleeping for 5 seconds ...", i) - time.Sleep(5 * time.Second) - } - assert.NotEqual(s.T(), 4, i) - - logger.Info("ensure OBC bucket was deleted") - var rgwErr int - for i = 0; i < 4; i++ { - _, rgwErr, _ = rgw.GetBucket(rgwcontext, bucketname) - if rgwErr == rgw.RGWErrorNotFound { - break - } - logger.Infof("(%d) check bucket deleted, sleeping for 5 seconds ...", i) - time.Sleep(5 * time.Second) - } - assert.NotEqual(s.T(), 4, i) - assert.Equal(s.T(), rgwErr, rgw.RGWErrorNotFound) - - dobErr := helper.BucketClient.DeleteBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", region) - assert.Nil(s.T(), dobErr) - }) - - t.Run("delete CephObjectStoreUser", func(t *testing.T) { - dosuErr := helper.ObjectUserClient.Delete(namespace, userid) - assert.Nil(s.T(), dosuErr) - logger.Info("Object store user deleted successfully") - logger.Info("Checking to see if the user secret has been deleted") - i := 0 - for i = 0; i < 4 && helper.ObjectUserClient.UserSecretExists(namespace, storeName, userid) == true; i++ { - logger.Infof("(%d) secret check sleeping for 5 seconds ...", i) - time.Sleep(5 * time.Second) - } - assert.False(s.T(), helper.ObjectUserClient.UserSecretExists(namespace, storeName, userid)) - }) - - t.Run("check that mgrs are not in a crashloop", func(t *testing.T) { - assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-mgr", namespace, 1, "Running")) - }) - - t.Run("CephObjectStore should delete now that dependents are gone", func(t *testing.T) { - // wait initially since it will almost never detect on the first try without this. - time.Sleep(3 * time.Second) - - store := &cephv1.CephObjectStore{} - i := 0 - for i = 0; i < 4; i++ { - storeStr, err := k8sh.GetResource("-n", namespace, "CephObjectStore", storeName, "-o", "json") - assert.NoError(t, err) - logger.Infof("store: \n%s", storeStr) - - err = json.Unmarshal([]byte(storeStr), &store) - assert.NoError(t, err) - - cond := cephv1.FindStatusCondition(store.Status.Conditions, cephv1.ConditionDeletionIsBlocked) - if cond.Status == v1.ConditionFalse { - break - } - logger.Info("waiting 3 more seconds for CephObjectStore to be unblocked by dependents") - time.Sleep(3 * time.Second) - } - assert.NotEqual(t, 4, i) - - assert.Equal(t, cephv1.ConditionDeleting, store.Status.Phase) // phase == "Deleting" - // verify deletion is NOT blocked b/c object has dependents - cond := cephv1.FindStatusCondition(store.Status.Conditions, cephv1.ConditionDeletionIsBlocked) - assert.Equal(t, v1.ConditionFalse, cond.Status) - assert.Equal(t, cephv1.ObjectHasNoDependentsReason, cond.Reason) - - err := k8sh.WaitUntilResourceIsDeleted("CephObjectStore", namespace, storeName) - assert.NoError(t, err) - }) - - // TODO : Add case for brownfield/cleanup s3 client} -} -func generateRgwTlsCertSecret(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName, rgwServiceName string) { - ctx := context.TODO() - root, err := utils.FindRookRoot() - require.NoError(s.T(), err, "failed to get rook root") - tlscertdir, err := ioutil.TempDir(root, "tlscertdir") - require.NoError(s.T(), err, "failed to create directory for TLS certs") - defer os.RemoveAll(tlscertdir) - cmdArgs := utils.CommandArgs{Command: filepath.Join(root, "tests/scripts/github-action-helper.sh"), - CmdArgs: []string{"generate_tls_config", tlscertdir, rgwServiceName, namespace}} - cmdOut := utils.ExecuteCommand(cmdArgs) - require.NoError(s.T(), cmdOut.Err) - tlsKeyIn, err := ioutil.ReadFile(filepath.Join(tlscertdir, rgwServiceName+".key")) - require.NoError(s.T(), err) - tlsCertIn, err := ioutil.ReadFile(filepath.Join(tlscertdir, rgwServiceName+".crt")) - require.NoError(s.T(), err) - tlsCaCertIn, err := ioutil.ReadFile(filepath.Join(tlscertdir, rgwServiceName+".ca")) - require.NoError(s.T(), err) - secretCertOut := fmt.Sprintf("%s%s%s", tlsKeyIn, tlsCertIn, tlsCaCertIn) - tlsK8sSecret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: storeName, - Namespace: namespace, - }, - Data: map[string][]byte{ - "cert": []byte(secretCertOut), - }, - } - _, err = k8sh.Clientset.CoreV1().Secrets(namespace).Create(ctx, tlsK8sSecret, metav1.CreateOptions{}) - require.Nil(s.T(), err) -} diff --git a/tests/integration/ceph_flex_test.go b/tests/integration/ceph_flex_test.go deleted file mode 100644 index 93caa1e3f..000000000 --- a/tests/integration/ceph_flex_test.go +++ /dev/null @@ -1,423 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "context" - "strconv" - "testing" - - "fmt" - - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/version" -) - -// ****************************************************** -// *** Scenarios tested by the TestCephFlexSuite *** -// Flex driver block scenarios - repeat for each PVC -// 1. ReadWriteOnce -// a. Mount Volume on a new pod - make sure persisted data is present and write new data -// b. Mount volume on two pods with - mount should be successful only on first pod -// 2. ReadOnlyMany -// a. Mount Multiple pods with same volume - All pods should be able to read data -// b. Mount Multiple pods with same volume - All pods should not be able to write data -// 3. Run StatefulSet with PVC -// a. Scale up pods -// b. Scale down pods -// c. Failover pods -// d. Delete StatefulSet -// -// Flex driver file system scenarios -// 1. Create a filesystem -// a. Mount a directory with the static mount -// ****************************************************** - -func TestCephFlexSuite(t *testing.T) { - if installer.SkipTestSuite(installer.CephTestSuite) { - t.Skip() - } - - s := new(CephFlexDriverSuite) - defer func(s *CephFlexDriverSuite) { - HandlePanics(recover(), s.TearDownSuite, s.T) - }(s) - suite.Run(t, s) -} - -type CephFlexDriverSuite struct { - suite.Suite - testClient *clients.TestClient - clusterInfo *client.ClusterInfo - bc *clients.BlockOperation - kh *utils.K8sHelper - installer *installer.CephInstaller - settings *installer.TestCephSettings - pvcNameRWO string - pvcNameRWX string -} - -func (s *CephFlexDriverSuite) SetupSuite() { - - namespace := "flex-ns" - s.pvcNameRWO = "block-persistent-rwo" - s.pvcNameRWX = "block-persistent-rwx" - s.settings = &installer.TestCephSettings{ - ClusterName: "flex-test", - Namespace: namespace, - OperatorNamespace: installer.SystemNamespace(namespace), - StorageClassName: "", - UseHelm: false, - UsePVC: false, - Mons: 1, - SkipOSDCreation: false, - UseCSI: false, - DirectMountToolbox: true, - RookVersion: installer.VersionMaster, - CephVersion: installer.OctopusVersion, - } - s.settings.ApplyEnvVars() - s.clusterInfo = client.AdminClusterInfo(namespace) - s.installer, s.kh = StartTestCluster(s.T, s.settings, flexDriverMinimalTestVersion) - s.testClient = clients.CreateTestClient(s.kh, s.installer.Manifests) - s.bc = s.testClient.BlockClient -} - -func (s *CephFlexDriverSuite) AfterTest(suiteName, testName string) { - s.installer.CollectOperatorLog(suiteName, testName) -} - -func (s *CephFlexDriverSuite) TestFileSystem() { - preserveFilesystemOnDelete := false - runFileE2ETest(s.testClient, s.kh, s.Suite, s.settings, "flex-fs", preserveFilesystemOnDelete) -} - -func (s *CephFlexDriverSuite) TestBlockStorageMountUnMountForStatefulSets() { - ctx := context.TODO() - poolName := "stspool" - storageClassName := "stssc" - reclaimPolicy := "Delete" - statefulSetName := "block-stateful-set" - statefulPodsName := "ststest" - - defer s.statefulSetDataCleanup(poolName, storageClassName, reclaimPolicy, statefulSetName, statefulPodsName) - logger.Infof("Test case when block persistent volumes are scaled up and down along with StatefulSet") - logger.Info("Step 1: Create pool and storageClass") - - err := s.testClient.PoolClient.Create(poolName, s.settings.Namespace, 1) - assert.Nil(s.T(), err) - err = s.testClient.BlockClient.CreateStorageClass(false, poolName, storageClassName, reclaimPolicy, s.settings.Namespace) - assert.Nil(s.T(), err) - logger.Info("Step 2 : Deploy statefulSet with 1X replication") - service, statefulset := getBlockStatefulSetAndServiceDefinition(defaultNamespace, statefulSetName, statefulPodsName, storageClassName) - _, err = s.kh.Clientset.CoreV1().Services(defaultNamespace).Create(ctx, service, metav1.CreateOptions{}) - assert.Nil(s.T(), err) - _, err = s.kh.Clientset.AppsV1().StatefulSets(defaultNamespace).Create(ctx, statefulset, metav1.CreateOptions{}) - assert.Nil(s.T(), err) - require.True(s.T(), s.kh.CheckPodCountAndState(statefulSetName, defaultNamespace, 1, "Running")) - require.True(s.T(), s.kh.CheckPvcCountAndStatus(statefulSetName, defaultNamespace, 1, "Bound")) - - logger.Info("Step 3 : Scale up replication on statefulSet") - scaleerr := s.kh.ScaleStatefulSet(statefulPodsName, defaultNamespace, 2) - assert.NoError(s.T(), scaleerr, "make sure scale up is successful") - require.True(s.T(), s.kh.CheckPodCountAndState(statefulSetName, defaultNamespace, 2, "Running")) - require.True(s.T(), s.kh.CheckPvcCountAndStatus(statefulSetName, defaultNamespace, 2, "Bound")) - - logger.Info("Step 4 : Scale down replication on statefulSet") - scaleerr = s.kh.ScaleStatefulSet(statefulPodsName, defaultNamespace, 1) - assert.NoError(s.T(), scaleerr, "make sure scale down is successful") - require.True(s.T(), s.kh.CheckPodCountAndState(statefulSetName, defaultNamespace, 1, "Running")) - require.True(s.T(), s.kh.CheckPvcCountAndStatus(statefulSetName, defaultNamespace, 2, "Bound")) - - logger.Info("Step 5 : Delete statefulSet") - delOpts := metav1.DeleteOptions{} - listOpts := metav1.ListOptions{LabelSelector: "app=" + statefulSetName} - err = s.kh.Clientset.CoreV1().Services(defaultNamespace).Delete(ctx, statefulSetName, delOpts) - assert.Nil(s.T(), err) - err = s.kh.Clientset.AppsV1().StatefulSets(defaultNamespace).Delete(ctx, statefulPodsName, delOpts) - assert.Nil(s.T(), err) - err = s.kh.Clientset.CoreV1().Pods(defaultNamespace).DeleteCollection(ctx, delOpts, listOpts) - assert.Nil(s.T(), err) - require.True(s.T(), s.kh.WaitUntilPodWithLabelDeleted(fmt.Sprintf("app=%s", statefulSetName), defaultNamespace)) - require.True(s.T(), s.kh.CheckPvcCountAndStatus(statefulSetName, defaultNamespace, 2, "Bound")) -} - -func (s *CephFlexDriverSuite) statefulSetDataCleanup(poolName, storageClassName, reclaimPolicy, statefulSetName, statefulPodsName string) { - ctx := context.TODO() - delOpts := metav1.DeleteOptions{} - listOpts := metav1.ListOptions{LabelSelector: "app=" + statefulSetName} - // Delete stateful set - err := s.kh.Clientset.CoreV1().Services(defaultNamespace).Delete(ctx, statefulSetName, delOpts) - assertNoErrorUnlessNotFound(s.Suite, err) - err = s.kh.Clientset.AppsV1().StatefulSets(defaultNamespace).Delete(ctx, statefulPodsName, delOpts) - assertNoErrorUnlessNotFound(s.Suite, err) - err = s.kh.Clientset.CoreV1().Pods(defaultNamespace).DeleteCollection(ctx, delOpts, listOpts) - assert.NoError(s.T(), err) - - // Delete all PVCs - s.kh.DeletePvcWithLabel(defaultNamespace, statefulSetName) - // Delete storageclass and pool - err = s.testClient.PoolClient.DeletePool(s.testClient.BlockClient, s.clusterInfo, poolName) - require.Nil(s.T(), err) - err = s.testClient.BlockClient.DeleteStorageClass(storageClassName) - require.Nil(s.T(), err) -} - -func (s *CephFlexDriverSuite) setupPVCs() { - logger.Infof("creating the test PVCs") - poolNameRWO := "block-pool-rwo" - storageClassNameRWO := "rook-ceph-block-rwo" - - // Create PVCs - err := s.testClient.BlockClient.CreateStorageClassAndPVC(defaultNamespace, poolNameRWO, storageClassNameRWO, "Delete", s.pvcNameRWO, "ReadWriteOnce") - require.Nil(s.T(), err) - require.True(s.T(), s.kh.WaitUntilPVCIsBound(defaultNamespace, s.pvcNameRWO), "Make sure PVC is Bound") - - err = s.testClient.BlockClient.CreatePVC(defaultNamespace, s.pvcNameRWX, storageClassNameRWO, "ReadWriteMany", "1M") - require.Nil(s.T(), err) - require.True(s.T(), s.kh.WaitUntilPVCIsBound(defaultNamespace, s.pvcNameRWX), "Make sure PVC is Bound") - - // Mount PVC on a pod and write some data. - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("setup-block-rwo", s.pvcNameRWO, false)) - require.Nil(s.T(), err) - crdName, err := s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWO) - require.Nil(s.T(), err) - s.kh.IsVolumeResourcePresent(s.settings.OperatorNamespace, crdName) - - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("setup-block-rwx", s.pvcNameRWX, false)) - require.Nil(s.T(), err) - crdName, err = s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWX) - require.Nil(s.T(), err) - s.kh.IsVolumeResourcePresent(s.settings.OperatorNamespace, crdName) - require.True(s.T(), s.kh.IsPodRunning("setup-block-rwo", defaultNamespace), "make sure setup-block-rwo pod is in running state") - require.True(s.T(), s.kh.IsPodRunning("setup-block-rwx", defaultNamespace), "make sure setup-block-rwx pod is in running state") - - // Write Data to Pod - message := "Persisted message one" - filename := "bsFile1" - err = s.kh.WriteToPod("", "setup-block-rwo", filename, message) - require.Nil(s.T(), err) - err = s.kh.WriteToPod("", "setup-block-rwx", filename, message) - require.Nil(s.T(), err) - - // Unmount pod - _, err = s.kh.DeletePods("setup-block-rwo", "setup-block-rwx") - require.Nil(s.T(), err) - require.True(s.T(), s.kh.IsPodTerminated("setup-block-rwo", defaultNamespace), "make sure setup-block-rwo pod is terminated") - require.True(s.T(), s.kh.IsPodTerminated("setup-block-rwx", defaultNamespace), "make sure setup-block-rwx pod is terminated") -} - -func (s *CephFlexDriverSuite) TearDownSuite() { - logger.Infof("Cleaning up block storage") - - _, err := s.kh.DeletePods( - "setup-block-rwo", "setup-block-rwx", "rwo-block-rw-one", "rwo-block-rw-two", "rwo-block-ro-one", - "rwo-block-ro-two", "rwx-block-rw-one", "rwx-block-rw-two", "rwx-block-ro-one", "rwx-block-ro-two") - assert.NoError(s.T(), err) - err = s.testClient.BlockClient.DeletePVC(s.settings.Namespace, s.pvcNameRWO) - assertNoErrorUnlessNotFound(s.Suite, err) - err = s.testClient.BlockClient.DeletePVC(s.settings.Namespace, s.pvcNameRWX) - assertNoErrorUnlessNotFound(s.Suite, err) - err = s.testClient.BlockClient.DeleteStorageClass("rook-ceph-block-rwo") - assert.NoError(s.T(), err) - err = s.testClient.BlockClient.DeleteStorageClass("rook-ceph-block-rwx") - assert.NoError(s.T(), err) - err = s.testClient.PoolClient.DeletePool(s.testClient.BlockClient, s.clusterInfo, "block-pool-rwo") - assert.NoError(s.T(), err) - err = s.testClient.PoolClient.DeletePool(s.testClient.BlockClient, s.clusterInfo, "block-pool-rwx") - assert.NoError(s.T(), err) - s.installer.UninstallRook() -} - -func (s *CephFlexDriverSuite) TestBlockStorageMountUnMountForDifferentAccessModes() { - s.setupPVCs() - - logger.Infof("Test case when existing RWO PVC is mounted and unmounted on pods with various accessModes") - logger.Infof("Step 1.1: Mount existing ReadWriteOnce and ReadWriteMany PVC on a Pod with RW access") - // mount PVC with RWO access on a pod with readonly set to false - err := s.bc.CreateClientPod(getFlexBlockPodDefinition("rwo-block-rw-one", s.pvcNameRWO, false)) - require.Nil(s.T(), err) - // mount PVC with RWX access on a pod with readonly set to false - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwx-block-rw-one", s.pvcNameRWX, false)) - require.Nil(s.T(), err) - crdName, err := s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWO) - assert.Nil(s.T(), err) - assert.True(s.T(), s.kh.IsVolumeResourcePresent(s.settings.OperatorNamespace, crdName), fmt.Sprintf("make sure Volume %s is created", crdName)) - assert.True(s.T(), s.kh.IsPodRunning("rwo-block-rw-one", defaultNamespace), "make sure block-rw-one pod is in running state") - - crdName, err = s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWX) - assert.Nil(s.T(), err) - assert.True(s.T(), s.kh.IsVolumeResourcePresent(s.settings.OperatorNamespace, crdName), fmt.Sprintf("make sure Volume %s is created", crdName)) - assert.True(s.T(), s.kh.IsPodRunning("rwx-block-rw-one", defaultNamespace), "make sure rwx-block-rw-one pod is in running state") - - logger.Infof("Step 2: Check if previously persisted data is readable from ReadWriteOnce and ReadWriteMany PVC") - // Read data on RWO PVC Mounted on pod with RW Access - filename1 := "bsFile1" - message1 := "Persisted message one" - err = s.kh.ReadFromPod("", "rwo-block-rw-one", filename1, message1) - assert.Nil(s.T(), err) - - // Read data on RWX PVC Mounted on pod with RW Access - err = s.kh.ReadFromPod("", "rwx-block-rw-one", filename1, message1) - assert.Nil(s.T(), err) - - logger.Infof("Step 3: Check if read/write works on ReadWriteOnce and ReadWriteMany PVC") - // Write data on RWO PVC Mounted on pod with RW Access - filename2 := "bsFile2" - message2 := "Persisted message two" - assert.Nil(s.T(), s.kh.WriteToPod("", "rwo-block-rw-one", filename2, message2)) - - // Read data on RWO PVC Mounted on pod with RW Access - assert.Nil(s.T(), s.kh.ReadFromPod("", "rwo-block-rw-one", filename2, message2)) - - // Write data on RWX PVC Mounted on pod with RW Access - assert.Nil(s.T(), s.kh.WriteToPod("", "rwx-block-rw-one", filename2, message2)) - - // Read data on RWX PVC Mounted on pod with RW Access - assert.Nil(s.T(), s.kh.ReadFromPod("", "rwx-block-rw-one", filename2, message2)) - - // Mount another Pod with RW access on same PVC - logger.Infof("Step 4: Mount existing ReadWriteOnce and ReadWriteMany PVC on a new Pod with RW access") - // Mount RWO PVC on a new pod with ReadOnly set to false - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwo-block-rw-two", s.pvcNameRWO, false)) - assert.Nil(s.T(), err) - // Mount RWX PVC on a new pod with ReadOnly set to false - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwx-block-rw-two", s.pvcNameRWX, false)) - assert.Nil(s.T(), err) - assert.True(s.T(), s.kh.IsPodInError("rwo-block-rw-two", defaultNamespace, "FailedMount", "Volume is already attached by pod"), "make sure rwo-block-rw-two pod errors out while mounting the volume") - assert.True(s.T(), s.kh.IsPodInError("rwx-block-rw-two", defaultNamespace, "FailedMount", "Volume is already attached by pod"), "make sure rwx-block-rw-two pod errors out while mounting the volume") - _, err = s.kh.DeletePods("rwo-block-rw-two", "rwx-block-rw-two") - assert.Nil(s.T(), err) - assert.True(s.T(), s.kh.IsPodTerminated("rwo-block-rw-two", defaultNamespace), "make sure rwo-block-rw-two pod is terminated") - assert.True(s.T(), s.kh.IsPodTerminated("rwx-block-rw-two", defaultNamespace), "make sure rwx-block-rw-two pod is terminated") - - logger.Infof("Step 5: Mount existing ReadWriteOnce and ReadWriteMany PVC on a new Pod with RO access") - // Mount RWO PVC on a new pod with ReadOnly set to true - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwo-block-ro-one", s.pvcNameRWO, true)) - assert.Nil(s.T(), err) - // Mount RWX PVC on a new pod with ReadOnly set to true - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwx-block-ro-one", s.pvcNameRWX, true)) - assert.Nil(s.T(), err) - assert.True(s.T(), s.kh.IsPodInError("rwo-block-ro-one", defaultNamespace, "FailedMount", "Volume is already attached by pod"), "make sure rwo-block-ro-one pod errors out while mounting the volume") - assert.True(s.T(), s.kh.IsPodInError("rwx-block-ro-one", defaultNamespace, "FailedMount", "Volume is already attached by pod"), "make sure rwx-block-ro-one pod errors out while mounting the volume") - _, err = s.kh.DeletePods("rwo-block-ro-one", "rwx-block-ro-one") - assert.Nil(s.T(), err) - assert.True(s.T(), s.kh.IsPodTerminated("rwo-block-ro-one", defaultNamespace), "make sure rwo-block-ro-one pod is terminated") - assert.True(s.T(), s.kh.IsPodTerminated("rwx-block-ro-one", defaultNamespace), "make sure rwx-block-ro-one pod is terminated") - - logger.Infof("Step 6: UnMount Pod with RWX and RWO access") - _, err = s.kh.DeletePods("rwo-block-rw-one", "rwx-block-rw-one") - assert.Nil(s.T(), err) - assert.True(s.T(), s.kh.IsPodTerminated("rwo-block-rw-one", defaultNamespace), "make sure rwo-block-rw-one pod is terminated") - assert.True(s.T(), s.kh.IsPodTerminated("rwx-block-rw-one", defaultNamespace), "make sure rwx-lock-rw-one pod is terminated") - - logger.Infof("Step 7: Mount ReadWriteOnce and ReadWriteMany PVC on two different pods with ReadOnlyMany with Readonly Access") - - // Mount RWO PVC on 2 pods with ReadOnly set to True - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwo-block-ro-one", s.pvcNameRWO, true)) - assert.Nil(s.T(), err) - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwo-block-ro-two", s.pvcNameRWO, true)) - assert.Nil(s.T(), err) - - // Mount RWX PVC on 2 pods with ReadOnly set to True - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwx-block-ro-one", s.pvcNameRWX, true)) - assert.Nil(s.T(), err) - err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwx-block-ro-two", s.pvcNameRWX, true)) - assert.Nil(s.T(), err) - assert.True(s.T(), s.kh.IsPodRunning("rwo-block-ro-one", defaultNamespace), "make sure rwo-block-ro-one pod is in running state") - assert.True(s.T(), s.kh.IsPodRunning("rwo-block-ro-two", defaultNamespace), "make sure rwo-block-ro-two pod is in running state") - assert.True(s.T(), s.kh.IsPodRunning("rwx-block-ro-one", defaultNamespace), "make sure rwx-block-ro-one pod is in running state") - assert.True(s.T(), s.kh.IsPodRunning("rwx-block-ro-two", defaultNamespace), "make sure rwx-block-ro-two pod is in running state") - - logger.Infof("Step 8: Read Data from both ReadyOnlyMany and ReadWriteOnce pods with ReadOnly Access") - // Read data from RWO PVC via both ReadOnly pods - assert.Nil(s.T(), s.kh.ReadFromPod("", "rwo-block-ro-one", filename1, message1)) - assert.Nil(s.T(), s.kh.ReadFromPod("", "rwo-block-ro-two", filename1, message1)) - - // Read data from RWX PVC via both ReadOnly pods - assert.Nil(s.T(), s.kh.ReadFromPod("", "rwx-block-ro-one", filename1, message1)) - assert.Nil(s.T(), s.kh.ReadFromPod("", "rwx-block-ro-two", filename1, message1)) - - logger.Infof("Step 9: Write Data to Pod with ReadOnlyMany and ReadWriteOnce PVC mounted with ReadOnly access") - // Write data to RWO PVC via pod with ReadOnly Set to true - message3 := "Persisted message three" - filename3 := "bsFile3" - err = s.kh.WriteToPod("", "rwo-block-ro-one", filename3, message3) - assert.Contains(s.T(), err.Error(), "failed to write file") - - // Write data to RWx PVC via pod with ReadOnly Set to true - err = s.kh.WriteToPod("", "rwx-block-ro-one", filename3, message3) - assert.Contains(s.T(), err.Error(), "failed to write file") - - logger.Infof("Step 10: UnMount Pod with ReadOnlyMany and ReadWriteOnce PVCs") - // UnMount RWO PVC from both ReadOnly Pods - _, err = s.kh.DeletePods("rwo-block-ro-one", "rwo-block-ro-two", "rwx-block-ro-one", "rwx-block-ro-two") - assert.Nil(s.T(), err) - assert.True(s.T(), s.kh.IsPodTerminated("rwo-block-ro-one", defaultNamespace), "make sure rwo-block-ro-one pod is terminated") - assert.True(s.T(), s.kh.IsPodTerminated("rwo-block-ro-two", defaultNamespace), "make sure rwo-block-ro-two pod is terminated") - assert.True(s.T(), s.kh.IsPodTerminated("rwx-block-ro-one", defaultNamespace), "make sure rwx-lock-ro-one pod is terminated") - assert.True(s.T(), s.kh.IsPodTerminated("rwx-block-ro-two", defaultNamespace), "make sure rwx-block-ro-two pod is terminated") - - // Test volume expansion - v := version.MustParseSemantic(s.kh.GetK8sServerVersion()) - if v.AtLeast(version.MustParseSemantic("1.15.0")) { - logger.Infof("additional step: Expand block storage") - // Expanding the image by applying new PVC specs - err := s.testClient.BlockClient.CreatePVC(defaultNamespace, s.pvcNameRWO, "rook-ceph-block-rwo", "ReadWriteOnce", "2M") - require.Nil(s.T(), err) - // Once the pod using the volume is terminated, the filesystem is expanded and the size of the PVC is increased. - expandedPodName := "setup-block-rwo" - err = s.kh.DeletePod(defaultNamespace, expandedPodName) - require.Nil(s.T(), err) - err = s.bc.CreateClientPod(getFlexBlockPodDefinition(expandedPodName, s.pvcNameRWO, false)) - require.Nil(s.T(), err) - require.True(s.T(), s.kh.IsPodRunning(expandedPodName, defaultNamespace), "Make sure new pod is running") - require.True(s.T(), s.kh.WaitUntilPVCIsExpanded(defaultNamespace, s.pvcNameRWO, "2M"), "Make sure PVC is expanded") - logger.Infof("Block Storage successfully expanded") - } -} - -func getFlexBlockPodDefinition(podName, blockName string, readOnly bool) string { - return `apiVersion: v1 -kind: Pod -metadata: - name: ` + podName + ` -spec: - containers: - - image: busybox - name: block-test1 - command: - - sleep - - "3600" - imagePullPolicy: IfNotPresent - volumeMounts: - - name: block-persistent-storage - mountPath: ` + utils.TestMountPath + ` - volumes: - - name: block-persistent-storage - persistentVolumeClaim: - claimName: ` + blockName + ` - readOnly: ` + strconv.FormatBool(readOnly) + ` - restartPolicy: Never` -} diff --git a/tests/integration/ceph_helm_test.go b/tests/integration/ceph_helm_test.go deleted file mode 100644 index 6efe9f89a..000000000 --- a/tests/integration/ceph_helm_test.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package integration - -import ( - "testing" - - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/suite" -) - -// *************************************************** -// *** Major scenarios tested by the TestHelmSuite *** -// Setup -// - A cluster created via the Helm chart -// Monitors -// - One mon -// OSDs -// - Bluestore running on a raw block device -// Block -// - Create a pool in the cluster -// - Mount/unmount a block device through the dynamic provisioner -// File system -// - Create a file system via the CRD -// Object -// - Create the object store via the CRD -// *************************************************** -func TestCephHelmSuite(t *testing.T) { - if installer.SkipTestSuite(installer.CephTestSuite) { - t.Skip() - } - - s := new(HelmSuite) - defer func(s *HelmSuite) { - HandlePanics(recover(), s.TearDownSuite, s.T) - }(s) - suite.Run(t, s) -} - -type HelmSuite struct { - suite.Suite - helper *clients.TestClient - installer *installer.CephInstaller - settings *installer.TestCephSettings - k8shelper *utils.K8sHelper -} - -func (h *HelmSuite) SetupSuite() { - namespace := "helm-ns" - h.settings = &installer.TestCephSettings{ - Namespace: namespace, - OperatorNamespace: namespace, - StorageClassName: "", - UseHelm: true, - UsePVC: false, - Mons: 1, - UseCSI: true, - SkipOSDCreation: false, - EnableAdmissionController: false, - EnableDiscovery: true, - RookVersion: installer.VersionMaster, - CephVersion: installer.OctopusVersion, - } - h.settings.ApplyEnvVars() - h.installer, h.k8shelper = StartTestCluster(h.T, h.settings, helmMinimalTestVersion) - h.helper = clients.CreateTestClient(h.k8shelper, h.installer.Manifests) -} - -func (h *HelmSuite) TearDownSuite() { - h.installer.UninstallRook() -} - -func (h *HelmSuite) AfterTest(suiteName, testName string) { - h.installer.CollectOperatorLog(suiteName, testName) -} - -// Test to make sure all rook components are installed and Running -func (h *HelmSuite) TestARookInstallViaHelm() { - checkIfRookClusterIsInstalled(h.Suite, h.k8shelper, h.settings.Namespace, h.settings.Namespace, 1) -} - -// Test BlockCreation on Rook that was installed via Helm -func (h *HelmSuite) TestBlockStoreOnRookInstalledViaHelm() { - runBlockCSITestLite(h.helper, h.k8shelper, h.Suite, h.settings) -} - -// Test File System Creation on Rook that was installed via helm -func (h *HelmSuite) TestFileStoreOnRookInstalledViaHelm() { - runFileE2ETestLite(h.helper, h.k8shelper, h.Suite, h.settings, "testfs") -} - -// Test Object StoreCreation on Rook that was installed via helm -func (h *HelmSuite) TestObjectStoreOnRookInstalledViaHelm() { - runObjectE2ETestLite(h.helper, h.k8shelper, h.Suite, h.settings, "default", 3, true) -} diff --git a/tests/integration/ceph_mgr_test.go b/tests/integration/ceph_mgr_test.go deleted file mode 100644 index 3f03ebb02..000000000 --- a/tests/integration/ceph_mgr_test.go +++ /dev/null @@ -1,254 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "encoding/json" - "fmt" - "sort" - "strings" - "testing" - "time" - - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" -) - -// ************************************************** -// *** Mgr operations covered by TestMgrSmokeSuite *** -// -// Ceph orchestrator device ls -// Ceph orchestrator status -// Ceph orchestrator host ls -// Ceph orchestrator create OSD -// Ceph orchestrator ls -// ************************************************** -func TestCephMgrSuite(t *testing.T) { - if installer.SkipTestSuite(installer.CephTestSuite) { - t.Skip() - } - // Skip this test suite in master and release builds. If there is an issue - // running against Ceph master we don't want to block the official builds. - if installer.TestIsOfficialBuild() { - t.Skip() - } - - logger.Info("TEMPORARILY disable the mgr test suite until https://github.com/rook/rook/issues/5877 is resolved") - t.Skip() - - s := new(CephMgrSuite) - defer func(s *CephMgrSuite) { - HandlePanics(recover(), s.TearDownSuite, s.T) - }(s) - suite.Run(t, s) -} - -type CephMgrSuite struct { - suite.Suite - settings *installer.TestCephSettings - k8sh *utils.K8sHelper - installer *installer.CephInstaller - namespace string -} - -type host struct { - Addr string - Hostname string - Labels []string - Status string -} - -type serviceStatus struct { - ContainerImageName string `json:"Container_image_name"` - LastRefresh string `json:"Last_refresh"` - Running int - Size int -} - -type service struct { - ServiceName string `json:"Service_name"` - ServiceType string `json:"Service_type"` - Status serviceStatus -} - -func (s *CephMgrSuite) SetupSuite() { - s.namespace = "mgr-ns" - - s.settings = &installer.TestCephSettings{ - ClusterName: s.namespace, - OperatorNamespace: installer.SystemNamespace(s.namespace), - Namespace: s.namespace, - StorageClassName: "", - UseHelm: false, - UsePVC: false, - Mons: 1, - UseCSI: true, - SkipOSDCreation: true, - RookVersion: installer.VersionMaster, - CephVersion: installer.MasterVersion, - } - s.settings.ApplyEnvVars() - s.installer, s.k8sh = StartTestCluster(s.T, s.settings, cephMasterSuiteMinimalTestVersion) - s.waitForOrchestrationModule() -} - -func (s *CephMgrSuite) AfterTest(suiteName, testName string) { - s.installer.CollectOperatorLog(suiteName, testName) -} - -func (s *CephMgrSuite) TearDownSuite() { - s.installer.UninstallRook() -} - -func (s *CephMgrSuite) execute(command []string) (error, string) { - orchCommand := append([]string{"orch"}, command...) - return s.installer.Execute("ceph", orchCommand, s.namespace) -} - -func (s *CephMgrSuite) waitForOrchestrationModule() { - var err error - for timeout := 0; timeout < 30; timeout++ { - err, output := s.execute([]string{"status"}) - logger.Infof("%s", output) - if err == nil { - logger.Info("Rook Toolbox ready to execute commands") - return - } - time.Sleep(2 * time.Second) - } - logger.Error("Giving up waiting for Rook Toolbox to be ready") - assert.Nil(s.T(), err) -} -func (s *CephMgrSuite) TestDeviceLs() { - logger.Info("Testing .... ") - err, device_list := s.execute([]string{"device", "ls"}) - assert.Nil(s.T(), err) - logger.Infof("output = %s", device_list) -} - -func (s *CephMgrSuite) TestStatus() { - logger.Info("Testing .... ") - err, status := s.execute([]string{"status"}) - assert.Nil(s.T(), err) - logger.Infof("output = %s", status) - - assert.Equal(s.T(), status, "Backend: rook\nAvailable: True") -} - -func (s *CephMgrSuite) TestHostLs() { - logger.Info("Testing .... ") - - // Get the orchestrator hosts - err, output := s.execute([]string{"host", "ls", "json"}) - assert.Nil(s.T(), err) - logger.Infof("output = %s", output) - - hosts := []byte(output) - var hostsList []host - - err = json.Unmarshal(hosts, &hostsList) - if err != nil { - assert.Nil(s.T(), err) - } - - var hostOutput []string - for _, hostItem := range hostsList { - hostOutput = append(hostOutput, hostItem.Addr) - } - sort.Strings(hostOutput) - - // get the k8s nodes - nodes, err := k8sutil.GetNodeHostNames(s.k8sh.Clientset) - assert.Nil(s.T(), err) - - k8sNodes := make([]string, 0, len(nodes)) - for k := range nodes { - k8sNodes = append(k8sNodes, k) - } - sort.Strings(k8sNodes) - - // nodes and hosts must be the same - assert.Equal(s.T(), hostOutput, k8sNodes) -} - -func (s *CephMgrSuite) TestCreateOSD() { - logger.Info("Testing .... ") - - // Get the first available device - err, deviceList := s.execute([]string{"device", "ls", "--format", "json"}) - assert.Nil(s.T(), err) - logger.Infof("output = %s", deviceList) - - inventory := make([]map[string]interface{}, 0) - - err = json.Unmarshal([]byte(deviceList), &inventory) - assert.Nil(s.T(), err) - - selectedNode := "" - selectedDevice := "" - for _, node := range inventory { - for _, device := range node["devices"].([]interface{}) { - if device.(map[string]interface{})["available"].(bool) { - selectedNode = node["name"].(string) - selectedDevice = strings.TrimPrefix(device.(map[string]interface{})["path"].(string), "/dev/") - break - } - } - if selectedDevice != "" { - break - } - } - assert.NotEqual(s.T(), "", selectedDevice, "No devices available to create test OSD") - assert.NotEqual(s.T(), "", selectedNode, "No nodes available to create test OSD") - - if selectedDevice == "" || selectedNode == "" { - return - } - // Create the OSD - err, output := s.execute([]string{"daemon", "add", "osd", fmt.Sprintf("%s:%s", selectedNode, selectedDevice)}) - - assert.Nil(s.T(), err) - logger.Infof("output = %s", output) - - err = s.k8sh.WaitForPodCount("app=rook-ceph-osd", s.namespace, 1) - assert.Nil(s.T(), err) -} - -func (s *CephMgrSuite) TestServiceLs() { - logger.Info("Testing .... ") - err, output := s.execute([]string{"ls", "--format", "json"}) - assert.Nil(s.T(), err) - logger.Infof("output = %s", output) - - services := []byte(output) - var servicesList []service - - err = json.Unmarshal(services, &servicesList) - assert.Nil(s.T(), err) - - for _, svc := range servicesList { - labelFilter := fmt.Sprintf("app=rook-ceph-%s", svc.ServiceName) - k8sPods, err := k8sutil.PodsRunningWithLabel(s.k8sh.Clientset, s.namespace, labelFilter) - logger.Infof("Service: %+v", svc) - logger.Infof("k8s pods for svc %q: %d", svc.ServiceName, k8sPods) - assert.Nil(s.T(), err) - assert.Equal(s.T(), svc.Status.Running, k8sPods, fmt.Sprintf("Wrong number of pods for kind of service <%s>", svc.ServiceName)) - } -} diff --git a/tests/integration/ceph_multi_cluster_test.go b/tests/integration/ceph_multi_cluster_test.go deleted file mode 100644 index 4959678c8..000000000 --- a/tests/integration/ceph_multi_cluster_test.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "path/filepath" - "testing" - - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -const ( - localPathPVCmd = "tests/scripts/localPathPV.sh" -) - -// ************************************************************* -// *** Major scenarios tested by the MultiClusterDeploySuite *** -// Setup -// - Two clusters started in different namespaces via the CRD -// Monitors -// - One mon in each cluster -// OSDs -// - Bluestore running on a raw block device -// Block -// - Create a pool in each cluster -// - Mount/unmount a block device through the dynamic provisioner -// File system -// - Create a file system via the CRD -// Object -// - Create the object store via the CRD -// ************************************************************* -func TestCephMultiClusterDeploySuite(t *testing.T) { - if installer.SkipTestSuite(installer.CephTestSuite) { - t.Skip() - } - - s := new(MultiClusterDeploySuite) - defer func(s *MultiClusterDeploySuite) { - HandlePanics(recover(), s.TearDownSuite, s.T) - }(s) - suite.Run(t, s) -} - -type MultiClusterDeploySuite struct { - suite.Suite - testClient *clients.TestClient - k8sh *utils.K8sHelper - settings *installer.TestCephSettings - externalManifests installer.CephManifests - installer *installer.CephInstaller - coreToolbox string - externalToolbox string - poolName string -} - -// Deploy Multiple Rook clusters -func (s *MultiClusterDeploySuite) SetupSuite() { - s.poolName = "multi-cluster-pool1" - coreNamespace := "multi-core" - s.settings = &installer.TestCephSettings{ - ClusterName: "multi-cluster", - Namespace: coreNamespace, - OperatorNamespace: installer.SystemNamespace(coreNamespace), - StorageClassName: "manual", - UsePVC: installer.UsePVC(), - Mons: 1, - UseCSI: true, - MultipleMgrs: true, - EnableAdmissionController: true, - RookVersion: installer.VersionMaster, - CephVersion: installer.NautilusVersion, - } - s.settings.ApplyEnvVars() - externalSettings := &installer.TestCephSettings{ - IsExternal: true, - ClusterName: "test-external", - Namespace: "multi-external", - OperatorNamespace: s.settings.OperatorNamespace, - RookVersion: s.settings.RookVersion, - UseCSI: true, - } - externalSettings.ApplyEnvVars() - s.externalManifests = installer.NewCephManifests(externalSettings) - - k8sh, err := utils.CreateK8sHelper(s.T) - assert.NoError(s.T(), err) - if !k8sh.VersionAtLeast("v1.16.0") { - s.T().Skip("requires at least k8s 1.16, no need to run on older versions") - } - - // Start the core storage cluster - s.setupMultiClusterCore() - s.createPools() - - // Start the external cluster that will connect to the core cluster - // create an external cluster - s.startExternalCluster() - - logger.Infof("finished starting clusters") -} - -func (s *MultiClusterDeploySuite) AfterTest(suiteName, testName string) { - s.installer.CollectOperatorLog(suiteName, testName) -} - -func (s *MultiClusterDeploySuite) createPools() { - // create a test pool in each cluster so that we get some PGs - logger.Infof("Creating pool %s", s.poolName) - err := s.testClient.PoolClient.Create(s.poolName, s.settings.Namespace, 1) - require.Nil(s.T(), err) -} - -func (s *MultiClusterDeploySuite) deletePools() { - // create a test pool in each cluster so that we get some PGs - clusterInfo := client.AdminClusterInfo(s.settings.Namespace) - if err := s.testClient.PoolClient.DeletePool(s.testClient.BlockClient, clusterInfo, s.poolName); err != nil { - logger.Errorf("failed to delete pool %q. %v", s.poolName, err) - } else { - logger.Infof("deleted pool %q", s.poolName) - } -} - -func (s *MultiClusterDeploySuite) TearDownSuite() { - s.deletePools() - s.installer.UninstallRookFromMultipleNS(s.externalManifests, s.installer.Manifests) -} - -// Test to make sure all rook components are installed and Running -func (s *MultiClusterDeploySuite) TestInstallingMultipleRookClusters() { - // Check if Rook cluster 1 is deployed successfully - client.RunAllCephCommandsInToolboxPod = s.coreToolbox - checkIfRookClusterIsInstalled(s.Suite, s.k8sh, s.settings.OperatorNamespace, s.settings.Namespace, 1) - checkIfRookClusterIsHealthy(s.Suite, s.testClient, s.settings.Namespace) - - // Check if Rook external cluster is deployed successfully - // Checking health status is enough to validate the connection - client.RunAllCephCommandsInToolboxPod = s.externalToolbox - checkIfRookClusterIsHealthy(s.Suite, s.testClient, s.externalManifests.Settings().Namespace) -} - -// Setup is wrapper for setting up multiple rook clusters. -func (s *MultiClusterDeploySuite) setupMultiClusterCore() { - root, err := utils.FindRookRoot() - require.NoError(s.T(), err, "failed to get rook root") - cmdArgs := utils.CommandArgs{Command: filepath.Join(root, localPathPVCmd), - CmdArgs: []string{installer.TestScratchDevice()}} - cmdOut := utils.ExecuteCommand(cmdArgs) - require.NoError(s.T(), cmdOut.Err) - - s.installer, s.k8sh = StartTestCluster(s.T, s.settings, multiClusterMinimalTestVersion) - s.testClient = clients.CreateTestClient(s.k8sh, s.installer.Manifests) - s.coreToolbox = client.RunAllCephCommandsInToolboxPod -} - -func (s *MultiClusterDeploySuite) startExternalCluster() { - err := s.installer.CreateRookExternalCluster(s.externalManifests) - if err != nil { - s.T().Fail() - s.installer.GatherAllRookLogs(s.T().Name(), s.externalManifests.Settings().Namespace) - require.NoError(s.T(), err) - } - - s.externalToolbox = client.RunAllCephCommandsInToolboxPod - logger.Infof("succeeded starting external cluster %s", s.externalManifests.Settings().Namespace) -} diff --git a/tests/integration/ceph_smoke_test.go b/tests/integration/ceph_smoke_test.go deleted file mode 100644 index 950743614..000000000 --- a/tests/integration/ceph_smoke_test.go +++ /dev/null @@ -1,356 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/rook/rook/pkg/daemon/ceph/client" - opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ************************************************ -// *** Major scenarios tested by the SmokeSuite *** -// Setup -// - via the cluster CRD -// Monitors -// - Three mons in the cluster -// - Failover of an unhealthy monitor -// OSDs -// - Bluestore running on devices -// Block -// - Mount/unmount a block device through the dynamic provisioner -// - Fencing of the block device -// - Read/write to the device -// File system -// - Create the file system via the CRD -// - Mount/unmount a file system in pod -// - Read/write to the file system -// - Delete the file system -// Object -// - Create the object store via the CRD -// - Create/delete buckets -// - Create/delete users -// - PUT/GET objects -// - Quota limit wrt no of objects -// ************************************************ -func TestCephSmokeSuite(t *testing.T) { - if installer.SkipTestSuite(installer.CephTestSuite) { - t.Skip() - } - - // Skip the suite if CSI is not supported - kh, err := utils.CreateK8sHelper(func() *testing.T { return t }) - require.NoError(t, err) - checkSkipCSITest(t, kh) - - s := new(SmokeSuite) - defer func(s *SmokeSuite) { - HandlePanics(recover(), s.TearDownSuite, s.T) - }(s) - suite.Run(t, s) -} - -type SmokeSuite struct { - suite.Suite - helper *clients.TestClient - settings *installer.TestCephSettings - installer *installer.CephInstaller - k8sh *utils.K8sHelper -} - -func (s *SmokeSuite) SetupSuite() { - namespace := "smoke-ns" - s.settings = &installer.TestCephSettings{ - ClusterName: "smoke-cluster", - Namespace: namespace, - OperatorNamespace: installer.SystemNamespace(namespace), - StorageClassName: installer.StorageClassName(), - UseHelm: false, - UsePVC: installer.UsePVC(), - Mons: 3, - SkipOSDCreation: false, - UseCSI: true, - EnableAdmissionController: true, - UseCrashPruner: true, - RookVersion: installer.VersionMaster, - CephVersion: installer.PacificVersion, - } - s.settings.ApplyEnvVars() - s.installer, s.k8sh = StartTestCluster(s.T, s.settings, smokeSuiteMinimalTestVersion) - if s.k8sh.VersionAtLeast("v1.16.0") { - s.settings.EnableVolumeReplication = true - } - s.helper = clients.CreateTestClient(s.k8sh, s.installer.Manifests) -} - -func (s *SmokeSuite) AfterTest(suiteName, testName string) { - s.installer.CollectOperatorLog(suiteName, testName) -} - -func (s *SmokeSuite) TearDownSuite() { - s.installer.UninstallRook() -} - -func (s *SmokeSuite) TestBlockStorage_SmokeTest() { - runBlockCSITest(s.helper, s.k8sh, s.Suite, s.settings.Namespace) -} - -func (s *SmokeSuite) TestFileStorage_SmokeTest() { - preserveFilesystemOnDelete := true - runFileE2ETest(s.helper, s.k8sh, s.Suite, s.settings, "smoke-test-fs", preserveFilesystemOnDelete) -} - -func (s *SmokeSuite) TestObjectStorage_SmokeTest() { - if utils.IsPlatformOpenShift() { - s.T().Skip("object store tests skipped on openshift") - } - runObjectE2ETest(s.helper, s.k8sh, s.Suite, s.settings.Namespace) -} - -// Test to make sure all rook components are installed and Running -func (s *SmokeSuite) TestARookClusterInstallation_SmokeTest() { - checkIfRookClusterIsInstalled(s.Suite, s.k8sh, s.settings.OperatorNamespace, s.settings.Namespace, 3) -} - -// Smoke Test for Mon failover - Test check the following operations for the Mon failover in order -// Delete mon pod, Wait for new mon pod -func (s *SmokeSuite) TestMonFailover() { - ctx := context.TODO() - logger.Infof("Mon Failover Smoke Test") - - deployments, err := s.getNonCanaryMonDeployments() - require.NoError(s.T(), err) - require.Equal(s.T(), 3, len(deployments)) - - monToKill := deployments[0].Name - logger.Infof("Killing mon %s", monToKill) - propagation := metav1.DeletePropagationForeground - delOptions := &metav1.DeleteOptions{PropagationPolicy: &propagation} - err = s.k8sh.Clientset.AppsV1().Deployments(s.settings.Namespace).Delete(ctx, monToKill, *delOptions) - require.NoError(s.T(), err) - - // Wait for the health check to start a new monitor - originalMonDeleted := false - for i := 0; i < 30; i++ { - deployments, err := s.getNonCanaryMonDeployments() - require.NoError(s.T(), err) - - // Make sure the old mon is not still alive - foundOldMon := false - for _, mon := range deployments { - if mon.Name == monToKill { - foundOldMon = true - } - } - - // Check if we have three monitors - if foundOldMon { - if originalMonDeleted { - // Depending on the state of the orchestration, the operator might trigger - // re-creation of the deleted mon. In this case, consider the test successful - // rather than wait for the failover which will never occur. - logger.Infof("Original mon created again, no need to wait for mon failover") - return - } - logger.Infof("Waiting for old monitor to stop") - } else { - logger.Infof("Waiting for a new monitor to start") - originalMonDeleted = true - if len(deployments) == 3 { - var newMons []string - for _, mon := range deployments { - newMons = append(newMons, mon.Name) - } - logger.Infof("Found a new monitor! monitors=%v", newMons) - return - } - - assert.Equal(s.T(), 2, len(deployments)) - } - - time.Sleep(5 * time.Second) - } - - require.Fail(s.T(), "giving up waiting for a new monitor") -} - -// Smoke Test for pool Resizing -func (s *SmokeSuite) TestPoolResize() { - ctx := context.TODO() - logger.Infof("Pool Resize Smoke Test") - - poolName := "testpool" - err := s.helper.PoolClient.Create(poolName, s.settings.Namespace, 1) - require.NoError(s.T(), err) - - poolFound := false - clusterInfo := client.AdminClusterInfo(s.settings.Namespace) - - // Wait for pool to appear - for i := 0; i < 10; i++ { - pools, err := s.helper.PoolClient.ListCephPools(clusterInfo) - require.NoError(s.T(), err) - for _, p := range pools { - if p.Name != poolName { - continue - } - poolFound = true - } - if poolFound { - break - } - - logger.Infof("Waiting for pool to appear") - time.Sleep(2 * time.Second) - } - - require.Equal(s.T(), true, poolFound, "pool not found") - - err = s.helper.PoolClient.Update(poolName, s.settings.Namespace, 2) - require.NoError(s.T(), err) - - poolResized := false - // Wait for pool resize to happen - for i := 0; i < 10; i++ { - details, err := s.helper.PoolClient.GetCephPoolDetails(clusterInfo, poolName) - require.NoError(s.T(), err) - if details.Size > 1 { - logger.Infof("pool %s size was updated", poolName) - require.Equal(s.T(), 2, int(details.Size)) - poolResized = true - - // resize the pool back to 1 to avoid hangs around not having enough OSDs to satisfy rbd - err = s.helper.PoolClient.Update(poolName, s.settings.Namespace, 1) - require.NoError(s.T(), err) - } else if poolResized && details.Size == 1 { - logger.Infof("pool resized back to 1") - break - } - - logger.Debugf("pool %s size not updated yet. details: %+v", poolName, details) - logger.Infof("Waiting for pool %s resize to happen", poolName) - time.Sleep(2 * time.Second) - } - - require.Equal(s.T(), true, poolResized, fmt.Sprintf("pool %s not found", poolName)) - - // Verify the Kubernetes Secret has been created (bootstrap peer token) - pool, err := s.k8sh.RookClientset.CephV1().CephBlockPools(s.settings.Namespace).Get(ctx, poolName, metav1.GetOptions{}) - assert.NoError(s.T(), err) - if pool.Spec.Mirroring.Enabled { - secretName := pool.Status.Info[opcontroller.RBDMirrorBootstrapPeerSecretName] - assert.NotEmpty(s.T(), secretName) - // now fetch the secret which contains the bootstrap peer token - secret, err := s.k8sh.Clientset.CoreV1().Secrets(s.settings.Namespace).Get(ctx, secretName, metav1.GetOptions{}) - require.NoError(s.T(), err) - assert.NotEmpty(s.T(), secret.Data["token"]) - } - - // clean up the pool - err = s.helper.PoolClient.DeletePool(s.helper.BlockClient, clusterInfo, poolName) - assert.NoError(s.T(), err) -} - -// Smoke Test for Client CRD -func (s *SmokeSuite) TestCreateClient() { - logger.Infof("Create Client Smoke Test") - - clientName := "client1" - caps := map[string]string{ - "mon": "allow rwx", - "mgr": "allow rwx", - "osd": "allow rwx", - } - clusterInfo := client.AdminClusterInfo(s.settings.Namespace) - err := s.helper.UserClient.Create(clientName, s.settings.Namespace, caps) - require.NoError(s.T(), err) - - clientFound := false - - for i := 0; i < 30; i++ { - clients, _ := s.helper.UserClient.Get(clusterInfo, "client."+clientName) - if clients != "" { - clientFound = true - } - - if clientFound { - break - } - - logger.Infof("Waiting for client to appear") - time.Sleep(2 * time.Second) - } - - assert.Equal(s.T(), true, clientFound, "client not found") - - logger.Infof("Update Client Smoke Test") - newcaps := map[string]string{ - "mon": "allow r", - "mgr": "allow rw", - "osd": "allow *", - } - caps, _ = s.helper.UserClient.Update(clusterInfo, clientName, newcaps) - - assert.Equal(s.T(), "allow r", caps["mon"], "wrong caps") - assert.Equal(s.T(), "allow rw", caps["mgr"], "wrong caps") - assert.Equal(s.T(), "allow *", caps["osd"], "wrong caps") - - err = s.helper.UserClient.Delete(clientName, s.settings.Namespace) - require.NoError(s.T(), err) -} - -// Smoke Test for RBD Mirror CRD -func (s *SmokeSuite) TestCreateRBDMirrorClient() { - logger.Infof("Create rbd-mirror Smoke Test") - - rbdMirrorName := "my-rbd-mirror" - - err := s.helper.RBDMirrorClient.Create(s.settings.Namespace, rbdMirrorName, 1) - require.NoError(s.T(), err) - - err = s.helper.RBDMirrorClient.Delete(s.settings.Namespace, rbdMirrorName) - require.NoError(s.T(), err) -} - -func (s *SmokeSuite) getNonCanaryMonDeployments() ([]appsv1.Deployment, error) { - ctx := context.TODO() - opts := metav1.ListOptions{LabelSelector: "app=rook-ceph-mon"} - deployments, err := s.k8sh.Clientset.AppsV1().Deployments(s.settings.Namespace).List(ctx, opts) - if err != nil { - return nil, err - } - nonCanaryMonDeployments := []appsv1.Deployment{} - for _, deployment := range deployments.Items { - if !strings.HasSuffix(deployment.GetName(), "-canary") { - nonCanaryMonDeployments = append(nonCanaryMonDeployments, deployment) - } - } - return nonCanaryMonDeployments, nil -} diff --git a/tests/integration/ceph_upgrade_test.go b/tests/integration/ceph_upgrade_test.go deleted file mode 100644 index e7f2a0c88..000000000 --- a/tests/integration/ceph_upgrade_test.go +++ /dev/null @@ -1,373 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "fmt" - "strings" - "testing" - "time" - - "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -const ( - rbdPodName = "test-pod-upgrade" - operatorContainer = "rook-ceph-operator" -) - -// ************************************************ -// *** Major scenarios tested by the UpgradeSuite *** -// Setup -// - Initially create a cluster from the previous minor release -// - Upgrade to the current build of Rook to verify functionality after upgrade -// - Test basic usage of block, object, and file after upgrade -// Monitors -// - One mon in the cluster -// ************************************************ -func TestCephUpgradeSuite(t *testing.T) { - if installer.SkipTestSuite(installer.CephTestSuite) { - t.Skip() - } - - // Skip the suite if CSI is not supported - kh, err := utils.CreateK8sHelper(func() *testing.T { return t }) - require.NoError(t, err) - checkSkipCSITest(t, kh) - - s := new(UpgradeSuite) - defer func(s *UpgradeSuite) { - HandlePanics(recover(), s.TearDownSuite, s.T) - }(s) - suite.Run(t, s) -} - -type UpgradeSuite struct { - suite.Suite - helper *clients.TestClient - k8sh *utils.K8sHelper - settings *installer.TestCephSettings - installer *installer.CephInstaller - namespace string -} - -func (s *UpgradeSuite) SetupSuite() { - s.namespace = "upgrade-ns" - s.settings = &installer.TestCephSettings{ - ClusterName: s.namespace, - Namespace: s.namespace, - OperatorNamespace: installer.SystemNamespace(s.namespace), - StorageClassName: "", - UseHelm: false, - UsePVC: false, - Mons: 1, - UseCSI: true, - SkipOSDCreation: false, - RookVersion: installer.Version1_6, - CephVersion: installer.NautilusPartitionVersion, - } - - s.installer, s.k8sh = StartTestCluster(s.T, s.settings, upgradeMinimalTestVersion) - s.helper = clients.CreateTestClient(s.k8sh, s.installer.Manifests) -} - -func (s *UpgradeSuite) TearDownSuite() { - s.installer.UninstallRook() -} - -func (s *UpgradeSuite) TestUpgradeToMaster() { - // - // Create block, object, and file storage before the upgrade - // - poolName := "upgradepool" - storageClassName := "block-upgrade" - blockName := "block-claim-upgrade" - logger.Infof("Initializing block before the upgrade") - clusterInfo := client.AdminClusterInfo(s.namespace) - setupBlockLite(s.helper, s.k8sh, s.Suite, clusterInfo, poolName, storageClassName, blockName, rbdPodName) - - createPodWithBlock(s.helper, s.k8sh, s.Suite, s.namespace, storageClassName, rbdPodName, blockName) - - // FIX: We should require block images to be removed. See tracking issue: - // - requireBlockImagesRemoved := false - defer blockTestDataCleanUp(s.helper, s.k8sh, s.Suite, clusterInfo, poolName, storageClassName, blockName, rbdPodName, requireBlockImagesRemoved) - - // Create the filesystem - logger.Infof("Initializing file before the upgrade") - filesystemName := "upgrade-test-fs" - activeCount := 1 - createFilesystem(s.helper, s.k8sh, s.Suite, s.settings, filesystemName, activeCount) - - // Start the file test client - fsStorageClass := "file-upgrade" - assert.NoError(s.T(), s.helper.FSClient.CreateStorageClass(filesystemName, s.settings.OperatorNamespace, s.namespace, fsStorageClass)) - createFilesystemConsumerPod(s.helper, s.k8sh, s.Suite, s.settings, filesystemName, fsStorageClass) - defer func() { - cleanupFilesystemConsumer(s.helper, s.k8sh, s.Suite, s.namespace, filePodName) - cleanupFilesystem(s.helper, s.k8sh, s.Suite, s.namespace, filesystemName) - }() - - logger.Infof("Initializing object before the upgrade") - objectStoreName := "upgraded-object" - runObjectE2ETestLite(s.helper, s.k8sh, s.Suite, s.settings, objectStoreName, 1, false) - - logger.Infof("Initializing object user before the upgrade") - objectUserID := "upgraded-user" - createCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, false) - - logger.Info("Initializing object bucket claim before the upgrade") - bucketStorageClassName := "rook-smoke-delete-bucket" - bucketPrefix := "generate-me" // use generated bucket name for this test - cobErr := s.helper.BucketClient.CreateBucketStorageClass(s.namespace, objectStoreName, bucketStorageClassName, "Delete", region) - require.Nil(s.T(), cobErr) - cobcErr := s.helper.BucketClient.CreateObc(obcName, bucketStorageClassName, bucketPrefix, maxObject, false) - require.Nil(s.T(), cobcErr) - defer func() { - _ = s.helper.ObjectUserClient.Delete(s.namespace, objectUserID) - _ = s.helper.BucketClient.DeleteObc(obcName, bucketStorageClassName, bucketPrefix, maxObject, false) - _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, objectStoreName, bucketStorageClassName, "Delete", region) - objectStoreCleanUp(s.Suite, s.helper, s.k8sh, s.settings.Namespace, objectStoreName) - }() - - created := utils.Retry(12, 2*time.Second, "OBC is created", func() bool { - // do not check if bound here b/c this fails in Rook v1.4 - return s.helper.BucketClient.CheckOBC(obcName, "created") - }) - require.True(s.T(), created) - - // verify that we're actually running the right pre-upgrade image - s.verifyOperatorImage(installer.Version1_6) - - message := "my simple message" - preFilename := "pre-upgrade-file" - assert.NoError(s.T(), s.k8sh.WriteToPod("", rbdPodName, preFilename, message)) - assert.NoError(s.T(), s.k8sh.ReadFromPod("", rbdPodName, preFilename, message)) - - // we will keep appending to this to continue verifying old files through the upgrades - rbdFilesToRead := []string{preFilename} - cephfsFilesToRead := []string{} - - // Get some info about the currently deployed OSDs to determine later if they are all updated - osdDepList, err := k8sutil.GetDeployments(s.k8sh.Clientset, s.namespace, "app=rook-ceph-osd") - require.NoError(s.T(), err) - osdDeps := osdDepList.Items - numOSDs := len(osdDeps) // there should be this many upgraded OSDs - require.NotEqual(s.T(), 0, numOSDs) - - // - // Upgrade Rook from v1.6 to master - // - logger.Infof("*** UPGRADING ROOK FROM %s to master ***", installer.Version1_6) - s.gatherLogs(s.settings.OperatorNamespace, "_before_master_upgrade") - s.upgradeToMaster() - - s.verifyOperatorImage(installer.VersionMaster) - s.verifyRookUpgrade(numOSDs) - err = s.installer.WaitForToolbox(s.namespace) - assert.NoError(s.T(), err) - - logger.Infof("Done with automatic upgrade from %s to master", installer.Version1_6) - newFile := "post-upgrade-1_6-to-master-file" - s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) - rbdFilesToRead = append(rbdFilesToRead, newFile) - cephfsFilesToRead = append(cephfsFilesToRead, newFile) - - checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true) - - // should be Bound after upgrade to Rook master - // do not need retry b/c the OBC controller runs parallel to Rook-Ceph orchestration - assert.True(s.T(), s.helper.BucketClient.CheckOBC(obcName, "bound")) - - logger.Infof("Verified upgrade from %s to master", installer.Version1_6) - - // - // Upgrade from nautilus to octopus - // - logger.Infof("*** UPGRADING CEPH FROM Nautilus TO Octopus ***") - s.gatherLogs(s.settings.OperatorNamespace, "_before_octopus_upgrade") - s.upgradeCephVersion(installer.OctopusVersion.Image, numOSDs) - // Verify reading and writing to the test clients - newFile = "post-octopus-upgrade-file" - s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) - logger.Infof("Verified upgrade from nautilus to octopus") - - checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true) - - // - // Upgrade from octopus to pacific - // - logger.Infof("*** UPGRADING CEPH FROM OCTOPUS TO PACIFIC ***") - s.gatherLogs(s.settings.OperatorNamespace, "_before_pacific_upgrade") - s.upgradeCephVersion(installer.PacificVersion.Image, numOSDs) - // Verify reading and writing to the test clients - newFile = "post-pacific-upgrade-file" - s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) - logger.Infof("Verified upgrade from octopus to pacific") - - checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true) -} - -func (s *UpgradeSuite) gatherLogs(systemNamespace, testSuffix string) { - // Gather logs before Ceph upgrade to help with debugging - if installer.TestLogCollectionLevel() == "all" { - s.k8sh.PrintPodDescribe(s.namespace) - } - n := strings.Replace(s.T().Name(), "/", "_", -1) + testSuffix - s.installer.GatherAllRookLogs(n, systemNamespace, s.namespace) -} - -func (s *UpgradeSuite) upgradeCephVersion(newCephImage string, numOSDs int) { - osdDepList, err := k8sutil.GetDeployments(s.k8sh.Clientset, s.namespace, "app=rook-ceph-osd") - require.NoError(s.T(), err) - oldCephVersion := osdDepList.Items[0].Labels["ceph-version"] // upgraded OSDs should not have this version label - - _, err = s.k8sh.Kubectl("-n", s.namespace, "patch", "CephCluster", s.namespace, "--type=merge", - "-p", fmt.Sprintf(`{"spec": {"cephVersion": {"image": "%s"}}}`, newCephImage)) - - assert.NoError(s.T(), err) - s.waitForUpgradedDaemons(oldCephVersion, "ceph-version", numOSDs, false) -} - -func (s *UpgradeSuite) verifyOperatorImage(expectedImage string) { - systemNamespace := installer.SystemNamespace(s.namespace) - - // verify that the operator spec is updated - version, err := k8sutil.GetDeploymentImage(s.k8sh.Clientset, systemNamespace, operatorContainer, operatorContainer) - assert.NoError(s.T(), err) - assert.Equal(s.T(), "rook/ceph:"+expectedImage, version) -} - -func (s *UpgradeSuite) verifyRookUpgrade(numOSDs int) { - // Get some info about the currently deployed mons to determine later if they are all updated - monDepList, err := k8sutil.GetDeployments(s.k8sh.Clientset, s.namespace, "app=rook-ceph-mon") - require.NoError(s.T(), err) - require.Equal(s.T(), s.settings.Mons, len(monDepList.Items), monDepList.Items) - - // Get some info about the currently deployed mgr to determine later if it is updated - mgrDepList, err := k8sutil.GetDeployments(s.k8sh.Clientset, s.namespace, "app=rook-ceph-mgr") - require.NoError(s.T(), err) - require.Equal(s.T(), 1, len(mgrDepList.Items)) - - // Get some info about the currently deployed OSDs to determine later if they are all updated - osdDepList, err := k8sutil.GetDeployments(s.k8sh.Clientset, s.namespace, "app=rook-ceph-osd") - require.NoError(s.T(), err) - require.NotZero(s.T(), len(osdDepList.Items)) - require.Equal(s.T(), numOSDs, len(osdDepList.Items), osdDepList.Items) - - d := osdDepList.Items[0] - oldRookVersion := d.Labels["rook-version"] // upgraded OSDs should not have this version label - - s.waitForUpgradedDaemons(oldRookVersion, "rook-version", numOSDs, true) -} - -func (s *UpgradeSuite) waitForUpgradedDaemons(previousVersion, versionLabel string, numOSDs int, waitForMDS bool) { - // wait for the mon(s) to be updated - monsNotOldVersion := fmt.Sprintf("app=rook-ceph-mon,%s!=%s", versionLabel, previousVersion) - err := s.k8sh.WaitForDeploymentCount(monsNotOldVersion, s.namespace, s.settings.Mons) - require.NoError(s.T(), err, "mon(s) didn't update") - err = s.k8sh.WaitForLabeledDeploymentsToBeReady(monsNotOldVersion, s.namespace) - require.NoError(s.T(), err) - - // wait for the mgr to be updated - mgrNotOldVersion := fmt.Sprintf("app=rook-ceph-mgr,%s!=%s", versionLabel, previousVersion) - err = s.k8sh.WaitForDeploymentCount(mgrNotOldVersion, s.namespace, 1) - require.NoError(s.T(), err, "mgr didn't update") - err = s.k8sh.WaitForLabeledDeploymentsToBeReady(mgrNotOldVersion, s.namespace) - require.NoError(s.T(), err) - - // wait for the osd pods to be updated - osdsNotOldVersion := fmt.Sprintf("app=rook-ceph-osd,%s!=%s", versionLabel, previousVersion) - err = s.k8sh.WaitForDeploymentCount(osdsNotOldVersion, s.namespace, numOSDs) - require.NoError(s.T(), err, "osd(s) didn't update") - err = s.k8sh.WaitForLabeledDeploymentsToBeReady(osdsNotOldVersion, s.namespace) - require.NoError(s.T(), err) - - // wait for the mds pods to be updated - // FIX: In v1.2 there was a race condition that can cause the MDS to not be updated, so we skip - // the check for MDS upgrade in case it's just a ceph upgrade (no operator restart) - if waitForMDS { - mdsesNotOldVersion := fmt.Sprintf("app=rook-ceph-mds,%s!=%s", versionLabel, previousVersion) - err = s.k8sh.WaitForDeploymentCount(mdsesNotOldVersion, s.namespace, 2 /* always expect 2 mdses */) - require.NoError(s.T(), err) - err = s.k8sh.WaitForLabeledDeploymentsToBeReady(mdsesNotOldVersion, s.namespace) - require.NoError(s.T(), err) - } - - rgwsNotOldVersion := fmt.Sprintf("app=rook-ceph-rgw,%s!=%s", versionLabel, previousVersion) - err = s.k8sh.WaitForDeploymentCount(rgwsNotOldVersion, s.namespace, 1 /* always expect 1 rgw */) - require.NoError(s.T(), err) - err = s.k8sh.WaitForLabeledDeploymentsToBeReady(rgwsNotOldVersion, s.namespace) - require.NoError(s.T(), err) - - // Give a few seconds for the daemons to settle down after the upgrade - time.Sleep(5 * time.Second) -} - -func (s *UpgradeSuite) verifyFilesAfterUpgrade(fsName, newFileToWrite, messageForAllFiles string, rbdFilesToRead, cephFSFilesToRead []string) { - retryCount := 5 - - for _, file := range rbdFilesToRead { - // test reading preexisting files in the pod with rbd mounted - // There is some unreliability right after the upgrade when there is only one osd, so we will retry if needed - assert.NoError(s.T(), s.k8sh.ReadFromPodRetry("", rbdPodName, file, messageForAllFiles, retryCount)) - } - - // test writing and reading a new file in the pod with rbd mounted - assert.NoError(s.T(), s.k8sh.WriteToPodRetry("", rbdPodName, newFileToWrite, messageForAllFiles, retryCount)) - assert.NoError(s.T(), s.k8sh.ReadFromPodRetry("", rbdPodName, newFileToWrite, messageForAllFiles, retryCount)) - - if fsName != "" { - // wait for filesystem to be active - clusterInfo := client.AdminClusterInfo(s.namespace) - err := waitForFilesystemActive(s.k8sh, clusterInfo, fsName) - require.NoError(s.T(), err) - - // test reading preexisting files in the pod with cephfs mounted - for _, file := range cephFSFilesToRead { - assert.NoError(s.T(), s.k8sh.ReadFromPodRetry(s.namespace, filePodName, file, messageForAllFiles, retryCount)) - } - - // test writing and reading a new file in the pod with cephfs mounted - assert.NoError(s.T(), s.k8sh.WriteToPodRetry(s.namespace, filePodName, newFileToWrite, messageForAllFiles, retryCount)) - assert.NoError(s.T(), s.k8sh.ReadFromPodRetry(s.namespace, filePodName, newFileToWrite, messageForAllFiles, retryCount)) - } -} - -// UpgradeToMaster performs the steps necessary to upgrade a Rook v1.4 cluster to master. It does not -// verify the upgrade but merely starts the upgrade process. -func (s *UpgradeSuite) upgradeToMaster() { - // Apply the CRDs for the latest master - s.settings.RookVersion = installer.VersionMaster - m := installer.NewCephManifests(s.settings) - require.NoError(s.T(), s.k8sh.ResourceOperation("apply", m.GetCRDs(s.k8sh))) - - require.NoError(s.T(), s.k8sh.ResourceOperation("apply", m.GetCommon())) - - require.NoError(s.T(), - s.k8sh.SetDeploymentVersion(s.settings.OperatorNamespace, operatorContainer, operatorContainer, installer.VersionMaster)) - - require.NoError(s.T(), - s.k8sh.SetDeploymentVersion(s.settings.Namespace, "rook-ceph-tools", "rook-ceph-tools", installer.VersionMaster)) -} diff --git a/pkg/operator/ceph/test/info.go b/tests/integration/integration.go similarity index 79% rename from pkg/operator/ceph/test/info.go rename to tests/integration/integration.go index 93d3d6d4d..30c2e26c2 100644 --- a/pkg/operator/ceph/test/info.go +++ b/tests/integration/integration.go @@ -14,5 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package test for the operator tests -package test +package integration + +import "github.com/coreos/pkg/capnslog" + +var ( + logger = capnslog.NewPackageLogger("github.com/rook/cassandra", "integrationTest") +) diff --git a/tests/integration/nfs_test.go b/tests/integration/nfs_test.go deleted file mode 100644 index 9dc6e33fd..000000000 --- a/tests/integration/nfs_test.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "fmt" - "testing" - "time" - - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "k8s.io/apimachinery/pkg/util/version" -) - -// ******************************************************* -// *** Major scenarios tested by the NfsSuite *** -// Setup -// - via the server CRD with very simple properties -// - 1 replica -// - Default server permissions -// - Mount a NFS export and write data to it and verify -// ******************************************************* -func TestNfsSuite(t *testing.T) { - if installer.SkipTestSuite(installer.NFSTestSuite) { - t.Skip() - } - - s := new(NfsSuite) - defer func(s *NfsSuite) { - HandlePanics(recover(), s.Teardown, s.T) - }(s) - suite.Run(t, s) -} - -type NfsSuite struct { - suite.Suite - k8shelper *utils.K8sHelper - installer *installer.NFSInstaller - rwClient *clients.ReadWriteOperation - namespace string - systemNamespace string - instanceCount int -} - -func (s *NfsSuite) SetupSuite() { - s.Setup() -} - -func (s *NfsSuite) TearDownSuite() { - s.Teardown() -} - -func (s *NfsSuite) Setup() { - s.namespace = "rook-nfs" - s.systemNamespace = installer.SystemNamespace(s.namespace) - s.instanceCount = 1 - - k8shelper, err := utils.CreateK8sHelper(s.T) - v := version.MustParseSemantic(k8shelper.GetK8sServerVersion()) - if !v.AtLeast(version.MustParseSemantic("1.14.0")) { - logger.Info("Skipping NFS tests when not at least K8s v1.14") - s.T().Skip() - } - - require.NoError(s.T(), err) - s.k8shelper = k8shelper - - k8sversion := s.k8shelper.GetK8sServerVersion() - logger.Infof("Installing nfs server on k8s %s", k8sversion) - - s.installer = installer.NewNFSInstaller(s.k8shelper, s.T) - - s.rwClient = clients.CreateReadWriteOperation(s.k8shelper) - - err = s.installer.InstallNFSServer(s.systemNamespace, s.namespace, s.instanceCount) - if err != nil { - logger.Errorf("nfs server installation failed: %+v", err) - s.T().Fail() - s.Teardown() - s.T().FailNow() - } -} - -func (s *NfsSuite) Teardown() { - s.installer.GatherAllNFSServerLogs(s.systemNamespace, s.namespace, s.T().Name()) - s.installer.UninstallNFSServer(s.systemNamespace, s.namespace) -} - -func (s *NfsSuite) TestNfsServerInstallation() { - logger.Infof("Verifying that nfs server pod %s is running", s.namespace) - - // verify nfs server operator is running OK - assert.True(s.T(), s.k8shelper.CheckPodCountAndState("rook-nfs-operator", s.systemNamespace, 1, "Running"), - "1 rook-nfs-operator must be in Running state") - - // verify nfs server instances are running OK - assert.True(s.T(), s.k8shelper.CheckPodCountAndState(s.namespace, s.namespace, s.instanceCount, "Running"), - fmt.Sprintf("%d rook-nfs pods must be in Running state", s.instanceCount)) - - // verify bigger export is running OK - assert.True(s.T(), true, s.k8shelper.WaitUntilPVCIsBound("default", "nfs-pv-claim-bigger")) - - podList, err := s.rwClient.CreateWriteClient("nfs-pv-claim-bigger") - require.NoError(s.T(), err) - assert.True(s.T(), true, s.checkReadData(podList)) - err = s.rwClient.Delete() - assert.NoError(s.T(), err) - - // verify another smaller export is running OK - assert.True(s.T(), true, s.k8shelper.WaitUntilPVCIsBound("default", "nfs-pv-claim")) - - defer s.rwClient.Delete() //nolint // delete a nfs consuming pod in rook - podList, err = s.rwClient.CreateWriteClient("nfs-pv-claim") - require.NoError(s.T(), err) - assert.True(s.T(), true, s.checkReadData(podList)) -} - -func (s *NfsSuite) checkReadData(podList []string) bool { - var result string - var err error - // the following for loop retries to read data from the first pod in the pod list - for i := 0; i < utils.RetryLoop; i++ { - // the nfs volume is mounted on "/mnt" and the data(hostname of the pod) is written in "/mnt/data" of the pod - // results stores the hostname of either one of the pod which is same as the pod name, which is read from "/mnt/data" - result, err = s.rwClient.Read(podList[0]) - logger.Infof("nfs volume read exited, err: %+v. result: %s", err, result) - if err == nil { - break - } - logger.Warning("nfs volume read failed, will try again") - time.Sleep(utils.RetryInterval * time.Second) - } - require.NoError(s.T(), err) - // the value of result must be same as the name of pod. - if result == podList[0] || result == podList[1] { - return true - } - - return false -} diff --git a/tests/integration/z_cassandra_test.go b/tests/integration/z_cassandra_test.go index f0997b11e..9f5aaaea4 100644 --- a/tests/integration/z_cassandra_test.go +++ b/tests/integration/z_cassandra_test.go @@ -22,9 +22,9 @@ import ( "testing" "time" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" + cassandrav1alpha1 "github.com/rook/cassandra/pkg/apis/cassandra.rook.io/v1alpha1" + "github.com/rook/cassandra/tests/framework/installer" + "github.com/rook/cassandra/tests/framework/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" diff --git a/tests/manifests/test-cluster-on-pvc-encrypted.yaml b/tests/manifests/test-cluster-on-pvc-encrypted.yaml deleted file mode 100644 index a1b71a127..000000000 --- a/tests/manifests/test-cluster-on-pvc-encrypted.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - dataDirHostPath: /var/lib/rook - mon: - count: 1 - volumeClaimTemplate: - spec: - storageClassName: manual - resources: - requests: - storage: 5Gi - cephVersion: - image: quay.io/ceph/ceph:v15 - dashboard: - enabled: false - network: - hostNetwork: false - crashCollector: - disable: true - storage: - storageClassDeviceSets: - - name: set1 - count: 1 - portable: false - tuneDeviceClass: false - encrypted: true - volumeClaimTemplates: - - metadata: - name: data - spec: - resources: - requests: - storage: 10Gi - storageClassName: manual - volumeMode: Block - accessModes: - - ReadWriteOnce diff --git a/tests/manifests/test-fs-mirror-spec.yaml b/tests/manifests/test-fs-mirror-spec.yaml deleted file mode 100644 index 061fa43f2..000000000 --- a/tests/manifests/test-fs-mirror-spec.yaml +++ /dev/null @@ -1,9 +0,0 @@ -spec: - mirroring: - enabled: true - snapshotSchedules: - - path: / - interval: 24h - snapshotRetention: - - path: / - duration: "h 24" diff --git a/tests/manifests/test-kms-vault-spec.yaml b/tests/manifests/test-kms-vault-spec.yaml deleted file mode 100644 index d9541f960..000000000 --- a/tests/manifests/test-kms-vault-spec.yaml +++ /dev/null @@ -1,10 +0,0 @@ -spec: - security: - kms: - connectionDetails: - KMS_PROVIDER: vault - VAULT_ADDR: https://vault.default.svc.cluster.local:8200 - VAULT_BACKEND_PATH: rook/ver1 - VAULT_SECRET_ENGINE: kv - VAULT_SKIP_VERIFY: "true" - tokenSecretName: rook-vault-token diff --git a/tests/manifests/test-kms-vault.yaml b/tests/manifests/test-kms-vault.yaml deleted file mode 100644 index 108e649bf..000000000 --- a/tests/manifests/test-kms-vault.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: rook-vault-token - namespace: rook-ceph -data: - token: ROOK_TOKEN \ No newline at end of file diff --git a/tests/manifests/test-object.yaml b/tests/manifests/test-object.yaml deleted file mode 100644 index d53b5f346..000000000 --- a/tests/manifests/test-object.yaml +++ /dev/null @@ -1,22 +0,0 @@ -################################################################################################################# -# Create an object store with settings for a test environment. Only a single OSD is required in this example. -# kubectl create -f object-test.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephObjectStore -metadata: - name: my-store - namespace: rook-ceph # namespace:cluster -spec: - metadataPool: - replicated: - size: 1 - dataPool: - replicated: - size: 1 - preservePoolsOnDelete: false - gateway: - port: 80 - # securePort: 443 - instances: 1 diff --git a/tests/manifests/test-on-pvc-db.yaml b/tests/manifests/test-on-pvc-db.yaml deleted file mode 100644 index 0a7a569ed..000000000 --- a/tests/manifests/test-on-pvc-db.yaml +++ /dev/null @@ -1,10 +0,0 @@ - - metadata: - name: metadata - spec: - resources: - requests: - storage: 2Gi - storageClassName: manual - volumeMode: Block - accessModes: - - ReadWriteOnce diff --git a/tests/manifests/test-on-pvc-wal.yaml b/tests/manifests/test-on-pvc-wal.yaml deleted file mode 100644 index 2b91a2ebc..000000000 --- a/tests/manifests/test-on-pvc-wal.yaml +++ /dev/null @@ -1,10 +0,0 @@ - - metadata: - name: wal - spec: - resources: - requests: - storage: 2Gi - storageClassName: manual - volumeMode: Block - accessModes: - - ReadWriteOnce diff --git a/tests/scripts/deploy-validate-vault.sh b/tests/scripts/deploy-validate-vault.sh index fc3809652..3e54399e6 100755 --- a/tests/scripts/deploy-validate-vault.sh +++ b/tests/scripts/deploy-validate-vault.sh @@ -30,59 +30,13 @@ if [[ "$(uname)" == "Linux" ]]; then install_helm fi -function generate_vault_tls_config { - openssl genrsa -out "${TMPDIR}"/vault.key 2048 - - cat <"${TMPDIR}"/csr.conf -[req] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[req_distinguished_name] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment -extendedKeyUsage = serverAuth -subjectAltName = @alt_names -[alt_names] -DNS.1 = ${SERVICE} -DNS.2 = ${SERVICE}.${NAMESPACE} -DNS.3 = ${SERVICE}.${NAMESPACE}.svc -DNS.4 = ${SERVICE}.${NAMESPACE}.svc.cluster.local -IP.1 = 127.0.0.1 -EOF - - openssl req -new -key "${TMPDIR}"/vault.key -subj "/CN=${SERVICE}.${NAMESPACE}.svc" -out "${TMPDIR}"/server.csr -config "${TMPDIR}"/csr.conf - - export CSR_NAME=vault-csr - - cat <"${TMPDIR}"/csr.yaml -apiVersion: certificates.k8s.io/v1beta1 -kind: CertificateSigningRequest -metadata: - name: ${CSR_NAME} -spec: - groups: - - system:authenticated - request: $(cat ${TMPDIR}/server.csr | base64 | tr -d '\n') - usages: - - digital signature - - key encipherment - - server auth -EOF - - kubectl create -f "${TMPDIR}/"csr.yaml - - kubectl certificate approve ${CSR_NAME} - - serverCert=$(kubectl get csr ${CSR_NAME} -o jsonpath='{.status.certificate}') - echo "${serverCert}" | openssl base64 -d -A -out "${TMPDIR}"/vault.crt - kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}' | base64 -d > "${TMPDIR}"/vault.ca +function create_secret_generic { kubectl create secret generic ${SECRET_NAME} \ --namespace ${NAMESPACE} \ --from-file=vault.key="${TMPDIR}"/vault.key \ --from-file=vault.crt="${TMPDIR}"/vault.crt \ --from-file=vault.ca="${TMPDIR}"/vault.ca - + # for rook kubectl create secret generic vault-ca-cert --namespace ${ROOK_NAMESPACE} --from-file=cert="${TMPDIR}"/vault.ca kubectl create secret generic vault-client-cert --namespace ${ROOK_NAMESPACE} --from-file=cert="${TMPDIR}"/vault.crt @@ -90,7 +44,7 @@ EOF } function vault_helm_tls { - + cat <"${TMPDIR}/"custom-values.yaml global: enabled: true @@ -119,19 +73,21 @@ server: path = "/vault/data" } EOF - + } function deploy_vault { # TLS config - generate_vault_tls_config + scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + bash "${scriptdir}"/generate-tls-config.sh "${TMPDIR}" ${SERVICE} ${NAMESPACE} + create_secret_generic vault_helm_tls - + # Install Vault with Helm helm repo add hashicorp https://helm.releases.hashicorp.com helm install vault hashicorp/vault --values "${TMPDIR}/"custom-values.yaml timeout 120 sh -c 'until kubectl get pods -l app.kubernetes.io/name=vault --field-selector=status.phase=Running|grep vault-0; do sleep 5; done' - + # Unseal Vault VAULT_INIT_TEMP_DIR=$(mktemp) kubectl exec -ti vault-0 -- vault operator init -format "json" -ca-cert /vault/userconfig/vault-server-tls/vault.crt | tee -a "$VAULT_INIT_TEMP_DIR" @@ -139,10 +95,10 @@ function deploy_vault { kubectl exec -ti vault-0 -- vault operator unseal -ca-cert /vault/userconfig/vault-server-tls/vault.crt "$(jq -r ".unseal_keys_b64[$i]" "$VAULT_INIT_TEMP_DIR")" done kubectl get pods -l app.kubernetes.io/name=vault - + # Wait for vault to be ready once unsealed while [[ $(kubectl get pods -l app.kubernetes.io/name=vault -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do echo "waiting vault to be ready" && sleep 1; done - + # Configure Vault ROOT_TOKEN=$(jq -r '.root_token' "$VAULT_INIT_TEMP_DIR") kubectl exec -it vault-0 -- vault login -ca-cert /vault/userconfig/vault-server-tls/vault.crt "$ROOT_TOKEN" @@ -151,7 +107,7 @@ function deploy_vault { kubectl exec -ti vault-0 -- vault secrets enable -ca-cert /vault/userconfig/vault-server-tls/vault.crt -path=rook/ver2 kv-v2 kubectl exec -ti vault-0 -- vault kv list -ca-cert /vault/userconfig/vault-server-tls/vault.crt rook/ver1 || true # failure is expected kubectl exec -ti vault-0 -- vault kv list -ca-cert /vault/userconfig/vault-server-tls/vault.crt rook/ver2 || true # failure is expected - + # Configure Vault Policy for Rook echo ' path "rook/*" { @@ -160,10 +116,10 @@ function deploy_vault { path "sys/mounts" { capabilities = ["read"] }'| kubectl exec -i vault-0 -- vault policy write -ca-cert /vault/userconfig/vault-server-tls/vault.crt rook - - + # Create a token for Rook ROOK_TOKEN=$(kubectl exec vault-0 -- vault token create -policy=rook -format json -ca-cert /vault/userconfig/vault-server-tls/vault.crt|jq -r '.auth.client_token'|base64) - + # Configure cluster sed -i "s|ROOK_TOKEN|${ROOK_TOKEN//[$'\t\r\n']}|" tests/manifests/test-kms-vault.yaml } @@ -176,7 +132,7 @@ function validate_rgw_token { RGW_TOKEN_FILE=$(kubectl -n rook-ceph describe pods "$RGW_POD" | grep "rgw-crypt-vault-token-file" | cut -f2- -d=) VAULT_PATH_PREFIX=$(kubectl -n rook-ceph describe pods "$RGW_POD" | grep "rgw-crypt-vault-prefix" | cut -f2- -d=) VAULT_TOKEN=$(kubectl -n rook-ceph exec $RGW_POD -- cat $RGW_TOKEN_FILE) - + #fetch key from vault server using token from RGW pod, P.S using -k for curl since custom ssl certs not yet to support in RGW FETCHED_KEY=$(kubectl -n rook-ceph exec $RGW_POD -- curl -k -X GET -H "X-Vault-Token:$VAULT_TOKEN" "$VAULT_SERVER""$VAULT_PATH_PREFIX"/"$RGW_BUCKET_KEY"|jq -r .data.data.key) if [[ "$ENCRYPTION_KEY" != "$FETCHED_KEY" ]]; then @@ -196,7 +152,7 @@ function validate_rgw_deployment { function validate_osd_secret { NB_OSD_PVC=$(kubectl -n rook-ceph get pvc|grep -c set1) NB_VAULT_SECRET=$(kubectl -n default exec -ti vault-0 -- vault kv list -ca-cert /vault/userconfig/vault-server-tls/vault.crt rook/ver1|grep -c set1) - + if [ "$NB_OSD_PVC" -ne "$NB_VAULT_SECRET" ]; then echo "number of osd pvc is $NB_OSD_PVC and number of vault secret is $NB_VAULT_SECRET, mismatch" exit 1 diff --git a/tests/scripts/deploy_admission_controller.sh b/tests/scripts/deploy_admission_controller.sh index c1f4ae644..10970d074 100755 --- a/tests/scripts/deploy_admission_controller.sh +++ b/tests/scripts/deploy_admission_controller.sh @@ -5,10 +5,10 @@ set -eEo pipefail function cleanup() { set +e - kubectl -n rook-ceph delete validatingwebhookconfigurations $WEBHOOK_CONFIG_NAME + kubectl -n rook-ceph delete validatingwebhookconfigurations "$WEBHOOK_CONFIG_NAME" kubectl -n rook-ceph delete certificate rook-admission-controller-cert kubectl -n rook-ceph delete issuers selfsigned-issuer - kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/$CERT_VERSION/cert-manager.yaml + kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/"$CERT_VERSION"/cert-manager.yaml set -e } @@ -17,23 +17,23 @@ function error_log() { kubectl -n rook-ceph get issuer kubectl -n rook-ceph get certificate kubectl -n rook-ceph get secret | grep rook-ceph-admission-controller - kubectl -n rook-ceph get validatingwebhookconfigurations.admissionregistration.k8s.io - kubectl describe validatingwebhookconfigurations.admissionregistration.k8s.io cert-manager-webhook - kubectl describe validatingwebhookconfigurations.admissionregistration.k8s.io rook-ceph-webhook + kubectl -n rook-ceph get validatingwebhookconfigurations.admissionregistration.k8s.io + kubectl describe validatingwebhookconfigurations.admissionregistration.k8s.io cert-manager-webhook + kubectl describe validatingwebhookconfigurations.admissionregistration.k8s.io rook-ceph-webhook kubectl -n cert-manager logs deploy/cert-manager-webhook --tail=10 kubectl -n cert-manager logs deploy/cert-manager-cainjector --tail=10 set -e +x cleanup } -trap cleanup SIGINT +trap cleanup SIGINT trap error_log ERR # Minimum 1.16.0 kubernetes version is required to start the admission controller SERVER_VERSION=$(kubectl version --short | awk -F "." '/Server Version/ {print $2}') MINIMUM_VERSION=16 -if [ ${SERVER_VERSION} -lt ${MINIMUM_VERSION} ]; then +if [ "${SERVER_VERSION}" -lt ${MINIMUM_VERSION} ]; then echo "required minimum kubernetes version 1.$MINIMUM_VERSION.0" exit fi @@ -51,10 +51,33 @@ echo "$BASE_DIR" echo "Deploying cert-manager" kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/$CERT_VERSION/cert-manager.yaml -timeout 150 sh -c 'until [ $(kubectl -n cert-manager get pods --field-selector=status.phase=Running|grep -c ^cert-) -eq 3 ]; do sleep 1 && echo "waiting for cert-manager pods to be in running state"; done' -timeout 20 sh -c 'until [ $(kubectl -n cert-manager get pods -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 3 ]; do sleep 1 && echo "waiting for the pods to be in ready state"; done' -timeout 25 sh -c 'until [ $(kubectl get validatingwebhookconfigurations cert-manager-webhook -o jsonpath='{.webhooks[*].clientConfig.caBundle}' | wc -c) -gt 1 ]; do sleep 1 && echo "waiting for caInjector to inject in caBundle for cert-manager validating webhook"; done' -timeout 25 sh -c 'until [ $(kubectl get mutatingwebhookconfigurations cert-manager-webhook -o jsonpath='{.webhooks[*].clientConfig.caBundle}' | wc -c) -gt 1 ]; do sleep 1 && echo "waiting for caInjector to inject in caBundle for cert-managers mutating webhook"; done' +timeout 150 bash <<-'EOF' + until [ $(kubectl -n cert-manager get pods --field-selector=status.phase=Running | grep -c ^cert-) -eq 3 ]; do + echo "waiting for cert-manager pods to be in running state" + sleep 1 + done +EOF + +timeout 20 bash <<-'EOF' + until [ $(kubectl -n cert-manager get pods -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 3 ]; do + echo "waiting for the pods to be in ready state" + sleep 1 + done +EOF + +timeout 25 bash <<-'EOF' + until [ $(kubectl get validatingwebhookconfigurations cert-manager-webhook -o jsonpath='{.webhooks[*].clientConfig.caBundle}' | wc -c) -gt 1 ]; do + echo "waiting for caInjector to inject in caBundle for cert-manager validating webhook" + sleep 1 + done +EOF + +timeout 25 bash <<-'EOF' + until [ $(kubectl get mutatingwebhookconfigurations cert-manager-webhook -o jsonpath='{.webhooks[*].clientConfig.caBundle}' | wc -c) -gt 1 ]; do + echo "waiting for caInjector to inject in caBundle for cert-managers mutating webhook" + sleep 1 + done +EOF echo "Successfully deployed cert-manager" @@ -87,7 +110,7 @@ EOF echo "Successfully created Issuer and Certificate" echo "Deploying webhook config" -cat ${BASE_DIR}/webhook-config.yaml | \ +< "${BASE_DIR}"/webhook-config.yaml \ "${BASE_DIR}"/webhook-patch-ca-bundle.sh | \ sed -e "s|\${NAMESPACE}|${NAMESPACE}|g" | \ sed -e "s|\${WEBHOOK_CONFIG_NAME}|${WEBHOOK_CONFIG_NAME}|g" | \ diff --git a/tests/scripts/deploy_admission_controller_test.sh b/tests/scripts/deploy_admission_controller_test.sh index 3ba1d0bb1..cd7145c1c 100755 --- a/tests/scripts/deploy_admission_controller_test.sh +++ b/tests/scripts/deploy_admission_controller_test.sh @@ -1,13 +1,13 @@ #!/usr/bin/env bash -BASE_DIR=$(cd "$(dirname "$0")"; pwd) +BASE_DIR=$(cd "$(dirname "$0")" && pwd) if [ -z "$KUBECONFIG" ];then if [ -f "$HOME/.kube/config" ]; then export KUBECONFIG="$HOME/.kube/config" else - sudo cp /etc/kubernetes/admin.conf $HOME/ - sudo chown $(id -u):$(id -g) $HOME/admin.conf + sudo cp /etc/kubernetes/admin.conf "$HOME"/ + sudo chown "$(id -u)":"$(id -g)" "$HOME"/admin.conf export KUBECONFIG=$HOME/admin.conf fi fi @@ -15,5 +15,5 @@ fi if [ -f "${BASE_DIR}"/deploy_admission_controller.sh ];then bash "${BASE_DIR}"/deploy_admission_controller.sh else - echo ""${BASE_DIR}"/deploy_admission_controller.sh not found !" -fi \ No newline at end of file + echo "${BASE_DIR}/deploy_admission_controller.sh not found !" +fi diff --git a/tests/scripts/dind-cluster-rbd b/tests/scripts/dind-cluster-rbd deleted file mode 100644 index e0d3b0ab6..000000000 --- a/tests/scripts/dind-cluster-rbd +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -DOCKER_HOST=unix:///opt/outer-docker.sock /usr/bin/docker run --rm -v /sys:/sys --net=host --privileged=true ceph/base rbd "$@" diff --git a/tests/scripts/dind-cluster.sh b/tests/scripts/dind-cluster.sh deleted file mode 100755 index e6eb0080e..000000000 --- a/tests/scripts/dind-cluster.sh +++ /dev/null @@ -1,990 +0,0 @@ -#!/bin/bash -# Copyright 2017 Mirantis -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail -set -o errtrace - -if [ $(uname) = Darwin ]; then - readlinkf(){ perl -MCwd -e 'print Cwd::abs_path shift' "$1";} -else - readlinkf(){ readlink -f "$1"; } -fi -DIND_ROOT="$(cd $(dirname "$(readlinkf "${BASH_SOURCE}")"); pwd)" - -RUN_ON_BTRFS_ANYWAY="${RUN_ON_BTRFS_ANYWAY:-}" -if [[ ! ${RUN_ON_BTRFS_ANYWAY} ]] && docker info| grep -q '^Storage Driver: btrfs'; then - echo "ERROR: Docker is using btrfs storage driver which is unsupported by kubeadm-dind-cluster" >&2 - echo "Please refer to the documentation for more info." >&2 - echo "Set RUN_ON_BTRFS_ANYWAY to non-empty string to continue anyway." >&2 - exit 1 -fi - -# In case of moby linux, -v will not work so we can't -# mount /lib/modules and /boot -is_moby_linux= -if docker info|grep -s '^Kernel Version: .*-moby$' > /dev/null 2>&1; then - is_moby_linux=1 -fi - -#%CONFIG% - -if [[ ! ${EMBEDDED_CONFIG:-} ]]; then - source "${DIND_ROOT}/config.sh" -fi - -CNI_PLUGIN="${CNI_PLUGIN:-bridge}" -DIND_SUBNET="${DIND_SUBNET:-10.192.0.0}" -dind_ip_base="$(echo "${DIND_SUBNET}" | sed 's/\.0$//')" -DIND_IMAGE="${DIND_IMAGE:-}" -BUILD_KUBEADM="${BUILD_KUBEADM:-}" -BUILD_HYPERKUBE="${BUILD_HYPERKUBE:-}" -APISERVER_PORT=${APISERVER_PORT:-8080} -NUM_NODES=${NUM_NODES:-2} -LOCAL_KUBECTL_VERSION=${LOCAL_KUBECTL_VERSION:-} -KUBECTL_DIR="${KUBECTL_DIR:-${HOME}/.kubeadm-dind-cluster}" -DASHBOARD_URL="${DASHBOARD_URL:-https://rawgit.com/kubernetes/dashboard/bfab10151f012d1acc5dfb1979f3172e2400aa3c/src/deploy/kubernetes-dashboard.yaml}" -SKIP_SNAPSHOT="${SKIP_SNAPSHOT:-}" -E2E_REPORT_DIR="${E2E_REPORT_DIR:-}" - -if [[ ! ${LOCAL_KUBECTL_VERSION:-} && ${DIND_IMAGE:-} =~ :(v[0-9]+\.[0-9]+)$ ]]; then - LOCAL_KUBECTL_VERSION="${BASH_REMATCH[1]}" -fi - -function dind::need-source { - if [[ ! -f cluster/kubectl.sh ]]; then - echo "$0 must be called from the Kubernetes repository root directory" 1>&2 - exit 1 - fi -} - -build_tools_dir="build" -use_k8s_source=y -if [[ ! ${BUILD_KUBEADM} && ! ${BUILD_HYPERKUBE} ]]; then - use_k8s_source= -fi -if [[ ${use_k8s_source} ]]; then - dind::need-source - kubectl=cluster/kubectl.sh - if [[ ! -f ${build_tools_dir}/common.sh ]]; then - build_tools_dir="build-tools" - fi -else - if [[ ! ${LOCAL_KUBECTL_VERSION:-} ]] && ! hash kubectl 2>/dev/null; then - echo "You need kubectl binary in your PATH to use prebuilt DIND image" 1>&2 - exit 1 - fi - kubectl=kubectl -fi - -busybox_image="busybox:1.26.2" -e2e_base_image="golang:1.7.1" -sys_volume_args=() -build_volume_args=() - -function dind::set-build-volume-args { - if [ ${#build_volume_args[@]} -gt 0 ]; then - return 0 - fi - build_container_name= - if [ -n "${KUBEADM_DIND_LOCAL:-}" ]; then - build_volume_args=(-v "$PWD:/go/src/k8s.io/kubernetes") - else - build_container_name="$(KUBE_ROOT=$PWD && - . ${build_tools_dir}/common.sh && - kube::build::verify_prereqs >&2 && - echo "${KUBE_DATA_CONTAINER_NAME:-${KUBE_BUILD_DATA_CONTAINER_NAME}}")" - build_volume_args=(--volumes-from "${build_container_name}") - fi -} - -function dind::volume-exists { - local name="$1" - if docker volume inspect "${name}" >& /dev/null; then - return 0 - fi - return 1 -} - -function dind::create-volume { - local name="$1" - docker volume create --label mirantis.kubeadm_dind_cluster --name "${name}" >/dev/null -} - -# We mount /boot and /lib/modules into the container -# below to in case some of the workloads need them. -# This includes virtlet, for instance. Also this may be -# useful in future if we want DIND nodes to pass -# preflight checks. -function dind::prepare-sys-mounts { - if [[ ! ${is_moby_linux} ]]; then - sys_volume_args=() - if [[ -d /boot ]]; then - sys_volume_args+=(-v /boot:/boot) - fi - if [[ -d /lib/modules ]]; then - sys_volume_args+=(-v /lib/modules:/lib/modules) - fi - return 0 - fi - if ! dind::volume-exists kubeadm-dind-sys; then - dind::step "Saving a copy of docker host's /lib/modules" - dind::create-volume kubeadm-dind-sys - # /lib/modules into sys.tar file on kubeadm-dind-sys volume. - local nsenter="nsenter --mount=/proc/1/ns/mnt --" - docker run \ - --rm \ - --privileged \ - -v kubeadm-dind-sys:/dest \ - --pid=host \ - "${busybox_image}" \ - /bin/sh -c \ - "if ${nsenter} test -d /lib/modules; then ${nsenter} tar -C / -c lib/modules >/dest/sys.tar; fi" - fi - sys_volume_args=(-v kubeadm-dind-sys:/dind-sys) -} - -tmp_containers=() - -function dind::cleanup { - if [ ${#tmp_containers[@]} -gt 0 ]; then - for name in "${tmp_containers[@]}"; do - docker rm -vf "${name}" 2>/dev/null - done - fi -} - -trap dind::cleanup EXIT - -function dind::check-image { - local name="$1" - if docker inspect --format 'x' "${name}" >&/dev/null; then - return 0 - else - return 1 - fi -} - -function dind::filter-make-output { - # these messages make output too long and make Travis CI choke - egrep -v --line-buffered 'I[0-9][0-9][0-9][0-9] .*(parse|conversion|defaulter|deepcopy)\.go:[0-9]+\]' -} - -function dind::run-build-command { - # this is like build/run.sh, but it doesn't rsync back the binaries, - # only the generated files. - local cmd=("$@") - ( - # The following is taken from build/run.sh and build/common.sh - # of Kubernetes source tree. It differs in - # --filter='+ /_output/dockerized/bin/**' - # being removed from rsync - . ${build_tools_dir}/common.sh - kube::build::verify_prereqs - kube::build::build_image - kube::build::run_build_command "$@" - - kube::log::status "Syncing out of container" - - kube::build::start_rsyncd_container - - local rsync_extra="" - if (( ${KUBE_VERBOSE} >= 6 )); then - rsync_extra="-iv" - fi - - # The filter syntax for rsync is a little obscure. It filters on files and - # directories. If you don't go in to a directory you won't find any files - # there. Rules are evaluated in order. The last two rules are a little - # magic. '+ */' says to go in to every directory and '- /**' says to ignore - # any file or directory that isn't already specifically allowed. - # - # We are looking to copy out all of the built binaries along with various - # generated files. - kube::build::rsync \ - --filter='- /_temp/' \ - --filter='+ zz_generated.*' \ - --filter='+ generated.proto' \ - --filter='+ *.pb.go' \ - --filter='+ types.go' \ - --filter='+ */' \ - --filter='- /**' \ - "rsync://k8s@${KUBE_RSYNC_ADDR}/k8s/" "${KUBE_ROOT}" - - kube::build::stop_rsyncd_container - ) -} - -function dind::make-for-linux { - local copy="$1" - shift - dind::step "Building binaries:" "$*" - if [ -n "${KUBEADM_DIND_LOCAL:-}" ]; then - dind::step "+ make WHAT=\"$*\"" - make WHAT="$*" 2>&1 | dind::filter-make-output - elif [ "${copy}" = "y" ]; then - dind::step "+ ${build_tools_dir}/run.sh make WHAT=\"$*\"" - "${build_tools_dir}/run.sh" make WHAT="$*" 2>&1 | dind::filter-make-output - else - dind::step "+ [using the build container] make WHAT=\"$*\"" - dind::run-build-command make WHAT="$*" 2>&1 | dind::filter-make-output - fi -} - -function dind::check-binary { - local filename="$1" - local dockerized="_output/dockerized/bin/linux/amd64/${filename}" - local plain="_output/local/bin/linux/amd64/${filename}" - dind::set-build-volume-args - # FIXME: don't hardcode amd64 arch - if [ -n "${KUBEADM_DIND_LOCAL:-${force_local:-}}" ]; then - if [ -f "${dockerized}" -o -f "${plain}" ]; then - return 0 - fi - elif docker run --rm "${build_volume_args[@]}" \ - "${busybox_image}" \ - test -f "/go/src/k8s.io/kubernetes/${dockerized}" >&/dev/null; then - return 0 - fi - return 1 -} - -function dind::ensure-downloaded-kubectl { - local kubectl_url - local kubectl_sha1 - local kubectl_sha1_linux - local kubectl_sha1_darwin - local kubectl_link - local kubectl_os - local full_kubectl_version - - case "${LOCAL_KUBECTL_VERSION}" in - v1.5) - full_kubectl_version=v1.5.4 - kubectl_sha1_linux=15d8430dc52b1f3772b88bc6a236c8fa58e07c0d - kubectl_sha1_darwin=5e671ba792567574eea48be4eddd844ba2f07c27 - ;; - v1.6) - full_kubectl_version=v1.6.6 - kubectl_sha1_linux=41153558717f3206d37f5bf34232a303ae4dade1 - kubectl_sha1_darwin=9795098e7340764b96a83e50676886d29e792033 - ;; - v1.7) - full_kubectl_version=v1.7.0 - kubectl_sha1_linux=c92ec52c02ec10a1ab54132d3cc99ad6f68c530e - kubectl_sha1_darwin=2e2708b873accafb1be8f328008e3d41a6a32c08 - ;; - "") - return 0 - ;; - *) - echo "Invalid kubectl version" >&2 - exit 1 - esac - - export PATH="${KUBECTL_DIR}:$PATH" - - if [ $(uname) = Darwin ]; then - kubectl_sha1="${kubectl_sha1_darwin}" - kubectl_os=darwin - else - kubectl_sha1="${kubectl_sha1_linux}" - kubectl_os=linux - fi - local link_target="kubectl-${full_kubectl_version}" - local link_name="${KUBECTL_DIR}"/kubectl - if [[ -h "${link_name}" && "$(readlink "${link_name}")" = "${link_target}" ]]; then - return 0 - fi - - local path="${KUBECTL_DIR}/${link_target}" - if [[ ! -f "${path}" ]]; then - mkdir -p "${KUBECTL_DIR}" - curl -sSLo "${path}" "https://storage.googleapis.com/kubernetes-release/release/${full_kubectl_version}/bin/${kubectl_os}/amd64/kubectl" - echo "${kubectl_sha1} ${path}" | sha1sum -c - chmod +x "${path}" - fi - - ln -fs "${link_target}" "${KUBECTL_DIR}/kubectl" -} - -function dind::ensure-kubectl { - if [[ ! ${use_k8s_source} ]]; then - # already checked on startup - dind::ensure-downloaded-kubectl - return 0 - fi - if [ $(uname) = Darwin ]; then - if [ ! -f _output/local/bin/darwin/amd64/kubectl ]; then - dind::step "Building kubectl" - dind::step "+ make WHAT=cmd/kubectl" - make WHAT=cmd/kubectl 2>&1 | dind::filter-make-output - fi - elif ! force_local=y dind::check-binary kubectl; then - dind::make-for-linux y cmd/kubectl - fi -} - -function dind::ensure-binaries { - local -a to_build=() - for name in "$@"; do - if ! dind::check-binary "$(basename "${name}")"; then - to_build+=("${name}") - fi - done - if [ "${#to_build[@]}" -gt 0 ]; then - dind::make-for-linux n "${to_build[@]}" - fi - return 0 -} - -function dind::ensure-network { - if ! docker network inspect kubeadm-dind-net >&/dev/null; then - docker network create --subnet="${DIND_SUBNET}/16" kubeadm-dind-net >/dev/null - fi -} - -function dind::ensure-volume { - local reuse_volume= - if [[ $1 = -r ]]; then - reuse_volume=1 - shift - fi - local name="$1" - if dind::volume-exists "${name}"; then - if [[ ! {reuse_volume} ]]; then - docker volume rm "${name}" >/dev/null - fi - elif [[ ${reuse_volume} ]]; then - echo "*** Failed to locate volume: ${name}" 1>&2 - return 1 - fi - dind::create-volume "${name}" -} - -function dind::run { - local reuse_volume= - if [[ $1 = -r ]]; then - reuse_volume="-r" - shift - fi - local container_name="${1:-}" - local ip="${2:-}" - local netshift="${3:-}" - local portforward="${4:-}" - if [[ $# -gt 4 ]]; then - shift 4 - else - shift $# - fi - local -a opts=(--ip "${ip}" "$@") - local -a args=("systemd.setenv=CNI_PLUGIN=${CNI_PLUGIN}") - - if [[ ! "${container_name}" ]]; then - echo >&2 "Must specify container name" - exit 1 - fi - - # remove any previously created containers with the same name - docker rm -vf "${container_name}" >&/dev/null || true - - if [[ "$portforward" ]]; then - opts+=(-p "$portforward") - fi - - if [[ ${CNI_PLUGIN} = bridge && ${netshift} ]]; then - args+=("systemd.setenv=CNI_BRIDGE_NETWORK_OFFSET=0.0.${netshift}.0") - fi - - opts+=(${sys_volume_args[@]+"${sys_volume_args[@]}"}) - - dind::step "Starting DIND container:" "${container_name}" - - if [[ ! ${is_moby_linux} ]]; then - opts+=(-v /boot:/boot -v /lib/modules:/lib/modules) - fi - - volume_name="kubeadm-dind-${container_name}" - dind::ensure-network - dind::ensure-volume ${reuse_volume} "${volume_name}" - - # TODO: create named volume for binaries and mount it to /k8s - # in case of the source build - - # Start the new container. - docker run \ - -d --privileged \ - --net kubeadm-dind-net \ - --name "${container_name}" \ - --hostname "${container_name}" \ - -l mirantis.kubeadm_dind_cluster \ - -v ${volume_name}:/dind \ - -v /lib/modules:/lib/modules \ - -v /sbin/modprobe:/sbin/modprobe \ - -v /dev:/dev \ - -v /sys/bus:/sys/bus \ - -v /var/run/docker.sock:/opt/outer-docker.sock \ - ${opts[@]+"${opts[@]}"} \ - "${DIND_IMAGE}" \ - ${args[@]+"${args[@]}"} -} - -function dind::kubeadm { - local container_id="$1" - shift - dind::step "Running kubeadm:" "$*" - status=0 - # See image/bare/wrapkubeadm. - # Capturing output is necessary to grab flags for 'kubeadm join' - if ! docker exec "${container_id}" wrapkubeadm "$@" 2>&1 | tee /dev/fd/2; then - echo "*** kubeadm failed" >&2 - return 1 - fi - return ${status} -} - -# function dind::bare { -# local container_name="${1:-}" -# if [[ ! "${container_name}" ]]; then -# echo >&2 "Must specify container name" -# exit 1 -# fi -# shift -# run_opts=(${@+"$@"}) -# dind::run "${container_name}" -# } - -function dind::configure-kubectl { - dind::step "Setting cluster config" - "${kubectl}" config set-cluster dind --server="http://localhost:${APISERVER_PORT}" --insecure-skip-tls-verify=true - "${kubectl}" config set-context dind --cluster=dind - "${kubectl}" config use-context dind -} - -force_make_binaries= -function dind::set-master-opts { - master_opts=() - if [[ ${BUILD_KUBEADM} || ${BUILD_HYPERKUBE} ]]; then - # share binaries pulled from the build container between nodes - dind::ensure-volume "dind-k8s-binaries" - dind::set-build-volume-args - master_opts+=("${build_volume_args[@]}" -v dind-k8s-binaries:/k8s) - local -a bins - if [[ ${BUILD_KUBEADM} ]]; then - master_opts+=(-e KUBEADM_SOURCE=build://) - bins+=(cmd/kubeadm) - fi - if [[ ${BUILD_HYPERKUBE} ]]; then - master_opts+=(-e HYPERKUBE_SOURCE=build://) - bins+=(cmd/hyperkube) - fi - if [[ ${force_make_binaries} ]]; then - dind::make-for-linux n "${bins[@]}" - else - dind::ensure-binaries "${bins[@]}" - fi - fi -} - -cached_use_rbac= -function dind::use-rbac { - # we use rbac in case of k8s 1.6 - if [[ ${cached_use_rbac} ]]; then - [[ ${cached_use_rbac} = 1 ]] && return 0 || return 1 - fi - cached_use_rbac=0 - if "${kubectl}" version --short >& /dev/null && ! "${kubectl}" version --short | grep -q 'Server Version: v1\.5\.'; then - cached_use_rbac=1 - return 0 - fi - return 1 -} - -function dind::deploy-dashboard { - dind::step "Deploying k8s dashboard" - "${kubectl}" create -f "${DASHBOARD_URL}" - if dind::use-rbac; then - # https://kubernetes-io-vnext-staging.netlify.com/docs/admin/authorization/rbac/#service-account-permissions - # Thanks @liggitt for the hint - "${kubectl}" create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default - fi -} - -function dind::init { - local -a opts - dind::set-master-opts - local master_ip="${dind_ip_base}.2" - local container_id=$(dind::run kube-master "${master_ip}" 1 127.0.0.1:${APISERVER_PORT}:8080 ${master_opts[@]+"${master_opts[@]}"}) - # FIXME: I tried using custom tokens with 'kubeadm ex token create' but join failed with: - # 'failed to parse response as JWS object [square/go-jose: compact JWS format must have three parts]' - # So we just pick the line from 'kubeadm init' output - local kube_version_flag="" - if [[ ${BUILD_KUBEADM} ]]; then - # FIXME: this is temporary fix for kubeadm trying to get a non-existent release URL. - # It doesn't change the fact that we're deploying custom-built k8s version - kube_version_flag="--kubernetes-version=stable-1.6" - fi - kubeadm_join_flags="$(dind::kubeadm "${container_id}" init --pod-network-cidr=10.244.0.0/16 --skip-preflight-checks ${kube_version_flag} "$@" | grep '^ *kubeadm join' | sed 's/^ *kubeadm join //')" - dind::configure-kubectl - dind::deploy-dashboard -} - -function dind::create-node-container { - local reuse_volume= - if [[ $1 = -r ]]; then - reuse_volume="-r" - shift - fi - # if there's just one node currently, it's master, thus we need to use - # kube-node-1 hostname, if there are two nodes, we should pick - # kube-node-2 and so on - local next_node_index=${1:-$(docker ps -q --filter=label=mirantis.kubeadm_dind_cluster | wc -l | sed 's/^ *//g')} - local node_ip="${dind_ip_base}.$((next_node_index + 2))" - local -a opts - if [[ ${BUILD_KUBEADM} || ${BUILD_HYPERKUBE} ]]; then - opts+=(-v dind-k8s-binaries:/k8s) - if [[ ${BUILD_KUBEADM} ]]; then - opts+=(-e KUBEADM_SOURCE=build://) - fi - if [[ ${BUILD_HYPERKUBE} ]]; then - opts+=(-e HYPERKUBE_SOURCE=build://) - fi - fi - dind::run ${reuse_volume} kube-node-${next_node_index} ${node_ip} $((next_node_index + 1)) "" ${opts[@]+"${opts[@]}"} -} - -function dind::join { - local container_id="$1" - shift - dind::kubeadm "${container_id}" join --skip-preflight-checks "$@" >/dev/null -} - -function dind::escape-e2e-name { - sed 's/[]\$*.^|()[]/\\&/g; s/\s\+/\\s+/g' <<< "$1" | tr -d '\n' -} - -function dind::accelerate-kube-dns { - dind::step "Patching kube-dns deployment to make it start faster" - # Could do this on the host, too, but we don't want to require jq here - # TODO: do this in wrapkubeadm - # 'kubectl version --short' is a quick check for kubectl 1.4 - # which doesn't support 'kubectl apply --force' - docker exec kube-master /bin/bash -c \ - "kubectl get deployment kube-dns -n kube-system -o json | jq '.spec.template.spec.containers[0].readinessProbe.initialDelaySeconds = 3|.spec.template.spec.containers[0].readinessProbe.periodSeconds = 3' | if kubectl version --short >&/dev/null; then kubectl apply --force -f -; else kubectl apply -f -; fi" -} - -function dind::component-ready { - local label="$1" - local out - if ! out="$("${kubectl}" get pod -l "${label}" -n kube-system \ - -o jsonpath='{ .items[*].status.conditions[?(@.type == "Ready")].status }' 2>/dev/null)"; then - return 1 - fi - if ! grep -v False <<<"${out}" | grep -q True; then - return 1 - fi - return 0 -} - -function dind::kill-failed-pods { - local pods - # workaround for https://github.com/kubernetes/kubernetes/issues/36482 - if ! pods="$(kubectl get pod -n kube-system -o jsonpath='{ .items[?(@.status.phase == "Failed")].metadata.name }' 2>/dev/null)"; then - return - fi - for name in ${pods}; do - kubectl delete pod --now -n kube-system "${name}" >&/dev/null || true - done -} - -function dind::wait-for-ready { - dind::step "Waiting for kube-proxy and the nodes" - local proxy_ready - local nodes_ready - local n=3 - while true; do - dind::kill-failed-pods - if "${kubectl}" get nodes 2>/dev/null| grep -q NotReady; then - nodes_ready= - else - nodes_ready=y - fi - if dind::component-ready k8s-app=kube-proxy; then - proxy_ready=y - else - proxy_ready= - fi - if [[ ${nodes_ready} && ${proxy_ready} ]]; then - if ((--n == 0)); then - echo "[done]" >&2 - break - fi - else - n=3 - fi - echo -n "." >&2 - sleep 1 - done - - dind::step "Bringing up kube-dns and kubernetes-dashboard" - "${kubectl}" scale deployment --replicas=1 -n kube-system kube-dns - "${kubectl}" scale deployment --replicas=1 -n kube-system kubernetes-dashboard - - while ! dind::component-ready k8s-app=kube-dns || ! dind::component-ready app=kubernetes-dashboard; do - echo -n "." >&2 - dind::kill-failed-pods - sleep 1 - done - echo "[done]" >&2 - - "${kubectl}" get nodes >&2 - dind::step "Access dashboard at:" "http://localhost:${APISERVER_PORT}/ui" -} - -function dind::up { - dind::down - dind::init - local master_ip="$(docker inspect --format="{{.NetworkSettings.IPAddress}}" kube-master)" - # pre-create node containers sequentially so they get predictable IPs - local -a node_containers - for ((n=1; n <= NUM_NODES; n++)); do - dind::step "Starting node container:" ${n} - if ! container_id="$(dind::create-node-container ${n})"; then - echo >&2 "*** Failed to start node container ${n}" - exit 1 - else - node_containers+=(${container_id}) - dind::step "Node container started:" ${n} - fi - done - status=0 - local -a pids - for ((n=1; n <= NUM_NODES; n++)); do - ( - dind::step "Joining node:" ${n} - container_id="${node_containers[n-1]}" - if ! dind::join ${container_id} ${kubeadm_join_flags}; then - echo >&2 "*** Failed to start node container ${n}" - exit 1 - else - dind::step "Node joined:" ${n} - fi - )& - pids[${n}]=$! - done - if ((NUM_NODES > 0)); then - for pid in ${pids[*]}; do - wait ${pid} - done - else - # FIXME: this may fail depending on k8s/kubeadm version - "${kubectl}" taint nodes kube-master node-role.kubernetes.io/master- || true - fi - case "${CNI_PLUGIN}" in - bridge) - ;; - flannel) - if dind::use-rbac; then - curl -sSL "https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml?raw=true" | "${kubectl}" create -f - - fi - # without --validate=false this will fail on older k8s versions - curl -sSL "https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml?raw=true" | "${kubectl}" create --validate=false -f - - ;; - calico) - if dind::use-rbac; then - "${kubectl}" apply -f http://docs.projectcalico.org/v2.1/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml - else - "${kubectl}" apply -f http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/kubeadm/calico.yaml - fi - ;; - weave) - if dind::use-rbac; then - "${kubectl}" apply -f "https://github.com/weaveworks/weave/blob/master/prog/weave-kube/weave-daemonset-k8s-1.6.yaml?raw=true" - else - "${kubectl}" apply -f https://git.io/weave-kube - fi - ;; - *) - echo "Unsupported CNI plugin '${CNI_PLUGIN}'" >&2 - ;; - esac - dind::accelerate-kube-dns - if [[ ${CNI_PLUGIN} != bridge ]]; then - # This is especially important in case of Calico - - # the cluster will not recover after snapshotting - # (at least not after restarting from the snapshot) - # if Calico installation is interrupted - dind::wait-for-ready - fi -} - -function dind::snapshot_container { - local container_name="$1" - docker exec -i ${container_name} /usr/local/bin/snapshot prepare - docker diff ${container_name} | docker exec -i ${container_name} /usr/local/bin/snapshot save -} - -function dind::snapshot { - dind::step "Taking snapshot of the cluster" - dind::snapshot_container kube-master - for ((n=1; n <= NUM_NODES; n++)); do - dind::snapshot_container "kube-node-${n}" - done - dind::wait-for-ready -} - -restore_cmd=restore -function dind::restore_container { - local container_id="$1" - docker exec ${container_id} /usr/local/bin/snapshot "${restore_cmd}" -} - -function dind::restore { - local master_ip="${dind_ip_base}.2" - dind::down - dind::step "Restoring master container" - dind::set-master-opts - for ((n=0; n <= NUM_NODES; n++)); do - ( - if [[ n -eq 0 ]]; then - dind::step "Restoring master container" - dind::restore_container "$(dind::run -r kube-master "${master_ip}" 1 127.0.0.1:${APISERVER_PORT}:8080 ${master_opts[@]+"${master_opts[@]}"})" - dind::step "Master container restored" - else - dind::step "Restoring node container:" ${n} - if ! container_id="$(dind::create-node-container -r ${n})"; then - echo >&2 "*** Failed to start node container ${n}" - exit 1 - else - dind::restore_container "${container_id}" - dind::step "Node container restored:" ${n} - fi - fi - )& - pids[${n}]=$! - done - for pid in ${pids[*]}; do - wait ${pid} - done - # Recheck kubectl config. It's possible that the cluster was started - # on this docker from different host - dind::configure-kubectl - dind::wait-for-ready -} - -function dind::down { - docker ps -a -q --filter=label=mirantis.kubeadm_dind_cluster | while read container_id; do - dind::step "Removing container:" "${container_id}" - docker rm -fv "${container_id}" - done -} - -function dind::remove-volumes { - # docker 1.13+: docker volume ls -q -f label=mirantis.kubeadm_dind_cluster - docker volume ls -q | (grep '^kubeadm-dind' || true) | while read volume_id; do - dind::step "Removing volume:" "${volume_id}" - docker volume rm "${volume_id}" - done -} - -function dind::check-for-snapshot { - if ! dind::volume-exists "kubeadm-dind-kube-master"; then - return 1 - fi - for ((n=1; n <= NUM_NODES; n++)); do - if ! dind::volume-exists "kubeadm-dind-kube-node-${n}"; then - return 1 - fi - done -} - -function dind::do-run-e2e { - local parallel="${1:-}" - local focus="${2:-}" - local skip="${3:-}" - dind::need-source - local test_args="--host=http://localhost:${APISERVER_PORT}" - local -a e2e_volume_opts=() - local term= - if [[ ${focus} ]]; then - test_args="--ginkgo.focus=${focus} ${test_args}" - fi - if [[ ${skip} ]]; then - test_args="--ginkgo.skip=${skip} ${test_args}" - fi - if [[ ${E2E_REPORT_DIR} ]]; then - test_args="--report-dir=/report ${test_args}" - e2e_volume_opts=(-v "${E2E_REPORT_DIR}:/report") - fi - dind::make-for-linux n cmd/kubectl test/e2e/e2e.test - dind::step "Running e2e tests with args:" "${test_args}" - dind::set-build-volume-args - if [ -t 1 ] ; then - term="-it" - test_args="--ginkgo.noColor ${test_args}" - fi - docker run \ - --rm ${term} \ - --net=host \ - "${build_volume_args[@]}" \ - -e KUBERNETES_PROVIDER=dind \ - -e KUBE_MASTER_IP=http://localhost:${APISERVER_PORT} \ - -e KUBE_MASTER=local \ - -e KUBERNETES_CONFORMANCE_TEST=y \ - -e GINKGO_PARALLEL=${parallel} \ - ${e2e_volume_opts[@]+"${e2e_volume_opts[@]}"} \ - -w /go/src/k8s.io/kubernetes \ - "${e2e_base_image}" \ - bash -c "cluster/kubectl.sh config set-cluster dind --server='http://localhost:${APISERVER_PORT}' --insecure-skip-tls-verify=true && - cluster/kubectl.sh config set-context dind --cluster=dind && - cluster/kubectl.sh config use-context dind && - go run hack/e2e.go --v --test -check_version_skew=false --test_args='${test_args}'" -} - -function dind::clean { - dind::down - # dind::remove-images - dind::remove-volumes - if docker network inspect kubeadm-dind-net >&/dev/null; then - docker network rm kubeadm-dind-net - fi -} - -function dind::run-e2e { - local focus="${1:-}" - local skip="${2:-\[Serial\]}" - if [[ "$focus" ]]; then - focus="$(dind::escape-e2e-name "${focus}")" - else - focus="\[Conformance\]" - fi - dind::do-run-e2e y "${focus}" "${skip}" -} - -function dind::run-e2e-serial { - local focus="${1:-}" - local skip="${2:-}" - dind::need-source - if [[ "$focus" ]]; then - focus="$(dind::escape-e2e-name "${focus}")" - else - focus="\[Serial\].*\[Conformance\]" - fi - dind::do-run-e2e n "${focus}" "${skip}" - # TBD: specify filter -} - -function dind::step { - local OPTS="" - if [ "$1" = "-n" ]; then - shift - OPTS+="-n" - fi - GREEN="$1" - shift - if [ -t 2 ] ; then - echo -e ${OPTS} "\x1B[97m* \x1B[92m${GREEN}\x1B[39m $*" 1>&2 - else - echo ${OPTS} "* ${GREEN} $*" 1>&2 - fi -} - -case "${1:-}" in - up) - if [[ ! ( ${DIND_IMAGE} =~ local ) ]]; then - dind::step "Making sure DIND image is up to date" - docker pull "${DIND_IMAGE}" >&2 - fi - - dind::prepare-sys-mounts - dind::ensure-kubectl - if [[ ${SKIP_SNAPSHOT} ]]; then - force_make_binaries=y dind::up - dind::wait-for-ready - elif ! dind::check-for-snapshot; then - force_make_binaries=y dind::up - dind::snapshot - else - dind::restore - fi - ;; - reup) - dind::prepare-sys-mounts - dind::ensure-kubectl - if [[ ${SKIP_SNAPSHOT} ]]; then - force_make_binaries=y dind::up - dind::wait-for-ready - elif ! dind::check-for-snapshot; then - force_make_binaries=y dind::up - dind::snapshot - else - force_make_binaries=y - restore_cmd=update_and_restore - dind::restore - fi - ;; - down) - dind::down - ;; - init) - shift - dind::prepare-sys-mounts - dind::ensure-kubectl - dind::init "$@" - ;; - join) - shift - dind::prepare-sys-mounts - dind::ensure-kubectl - dind::join "$(dind::create-node-container)" "$@" - ;; - # bare) - # shift - # dind::bare "$@" - # ;; - snapshot) - shift - dind::snapshot - ;; - restore) - shift - dind::restore - ;; - clean) - dind::clean - ;; - e2e) - shift - dind::run-e2e "$@" - ;; - e2e-serial) - shift - dind::run-e2e-serial "$@" - ;; - *) - echo "usage:" >&2 - echo " $0 up" >&2 - echo " $0 reup" >&2 - echo " $0 down" >&2 - echo " $0 init kubeadm-args..." >&2 - echo " $0 join kubeadm-args..." >&2 - # echo " $0 bare container_name [docker_options...]" - echo " $0 clean" - echo " $0 e2e [test-name-substring]" >&2 - echo " $0 e2e-serial [test-name-substring]" >&2 - exit 1 - ;; -esac diff --git a/tests/scripts/gen_release_notes.sh b/tests/scripts/gen_release_notes.sh new file mode 100755 index 000000000..061435cae --- /dev/null +++ b/tests/scripts/gen_release_notes.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -e + +function help() { + print=" + To run this command, + 1. verify you are selecting right branch from GitHub UI dropdown menu + 2. enter the tag you want to create + " + echo "$print" + exit 1 +} + +if [ -z "${GITHUB_USER}" ] || [ -z "${GITHUB_TOKEN}" ]; then + echo "requires both GITHUB_USER and GITHUB_TOKEN to be set as env variable" + help +fi + +pr_list=$(git log --pretty="%s" --merges --left-only "${FROM_BRANCH}"..."${TO_TAG}" | grep pull | awk '/Merge pull request/ {print $4}' | cut -c 2-) + +# for releases notes +function release_notes() { + for pr in $pr_list; do + # get PR title + backport_pr=$(curl -s -u "${GITHUB_USER}":"${GITHUB_TOKEN}" "https://api.github.com/repos/rook/rook/pulls/${pr}" | jq '.title') + # with upstream/release-1.6 v1.6.8, it was giving extra PR numbers, so removing after PR for changing tag is merged. + if [[ "$backport_pr" =~ ./*"build: Update build version to $TO_TAG"* ]]; then + break + fi + # check if it is manual backport PR or not, for mergify backport PR it will contain "(backport" + if [[ "$backport_pr" =~ .*"(backport".* ]]; then + # find the PR number after the # + original_pr=$(echo "$backport_pr" | sed -n -e 's/^.*#//p' | grep -E0o '[0-9]' | tr -d '\n') + else + # in manual backport PR, we'll directly fetch the owner and title from the PR number + original_pr=$pr + fi + # get the PR title and PR owner in required format + title_with_user=$(curl -s -u "${GITHUB_USER}":"${GITHUB_TOKEN}" "https://api.github.com/repos/rook/rook/pulls/${original_pr}" | jq '.title+ " (#, @"+.user.login+")"') + # add PR number after "#" + result=$(echo "$title_with_user" | sed "s/(#/(#$original_pr/" |tail -c +2) + # remove last `"` + result=${result%\"} + echo "$result" + done +} + +release_notes diff --git a/tests/scripts/generate-tls-config.sh b/tests/scripts/generate-tls-config.sh new file mode 100755 index 000000000..a711ab3f8 --- /dev/null +++ b/tests/scripts/generate-tls-config.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +set -xe + +DIR=$1 +SERVICE=$2 +NAMESPACE=$3 +IP=$4 +if [ -z "${IP}" ]; then + IP=127.0.0.1 +fi + +openssl genrsa -out "${DIR}"/"${SERVICE}".key 2048 + +cat <"${DIR}"/csr.conf +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth +subjectAltName = @alt_names +[alt_names] +DNS.1 = ${SERVICE} +DNS.2 = ${SERVICE}.${NAMESPACE} +DNS.3 = ${SERVICE}.${NAMESPACE}.svc +DNS.4 = ${SERVICE}.${NAMESPACE}.svc.cluster.local +IP.1 = ${IP} +EOF + +openssl req -new -key "${DIR}"/"${SERVICE}".key -subj "/CN=system:node:${SERVICE};/O=system:nodes" -out "${DIR}"/server.csr -config "${DIR}"/csr.conf + +export CSR_NAME=${SERVICE}-csr + +# Minimum 1.19.0 kubernetes version is required for certificates.k8s.io/v1 version +SERVER_VERSION=$(kubectl version --short | awk -F "." '/Server Version/ {print $2}') +MINIMUM_VERSION=19 +if [ "${SERVER_VERSION}" -lt "${MINIMUM_VERSION}" ] +then + cat <"${DIR}"/csr.yaml + apiVersion: certificates.k8s.io/v1beta1 + kind: CertificateSigningRequest + metadata: + name: ${CSR_NAME} + spec: + groups: + - system:authenticated + request: $(cat "${DIR}"/server.csr | base64 | tr -d '\n') + usages: + - digital signature + - key encipherment + - server auth +EOF +else + cat <"${DIR}"/csr.yaml + apiVersion: certificates.k8s.io/v1 + kind: CertificateSigningRequest + metadata: + name: ${CSR_NAME} + spec: + groups: + - system:authenticated + request: $(cat "${DIR}"/server.csr | base64 | tr -d '\n') + signerName: kubernetes.io/kubelet-serving + usages: + - digital signature + - key encipherment + - server auth +EOF +fi + +kubectl create -f "${DIR}/"csr.yaml + +kubectl certificate approve "${CSR_NAME}" + +serverCert=$(kubectl get csr "${CSR_NAME}" -o jsonpath='{.status.certificate}') +echo "${serverCert}" | openssl base64 -d -A -out "${DIR}"/"${SERVICE}".crt +kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}' | base64 -d > "${DIR}"/"${SERVICE}".ca diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh index fe2f7dfea..2b49f46e5 100755 --- a/tests/scripts/github-action-helper.sh +++ b/tests/scripts/github-action-helper.sh @@ -121,7 +121,7 @@ function build_rook() { tests/scripts/validate_modified_files.sh build docker images if [[ "$build_type" == "build" ]]; then - docker tag $(docker images | awk '/build-/ {print $1}') rook/ceph:v1.7.2 + docker tag "$(docker images | awk '/build-/ {print $1}')" rook/ceph:v1.7.2 fi } @@ -133,7 +133,10 @@ function validate_yaml() { cd cluster/examples/kubernetes/ceph kubectl create -f crds.yaml -f common.yaml # skipping folders and some yamls that are only for openshift. - kubectl create $(ls -I scc.yaml -I "*-openshift.yaml" -I "*.sh" -I "*.py" -p | grep -v / | awk ' { print " -f " $1 } ') --dry-run + manifests="$(find . -maxdepth 1 -type f -name '*.yaml' -and -not -name '*openshift*' -and -not -name 'scc*')" + with_f_arg="$(echo "$manifests" | awk '{printf " -f %s",$1}')" # don't add newline + # shellcheck disable=SC2086 # '-f manifest1.yaml -f manifest2.yaml etc.' should not be quoted + kubectl create ${with_f_arg} --dry-run } function create_cluster_prerequisites() { @@ -156,9 +159,18 @@ function deploy_cluster() { } function wait_for_prepare_pod() { - timeout 180 sh -c 'until kubectl -n rook-ceph logs -f job/$(kubectl -n rook-ceph get job -l app=rook-ceph-osd-prepare -o jsonpath='{.items[0].metadata.name}'); do sleep 5; done' || true - timeout 60 sh -c 'until kubectl -n rook-ceph logs $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd,ceph_daemon_id=0 -o jsonpath='{.items[*].metadata.name}') --all-containers; do echo "waiting for osd container" && sleep 1; done' || true - kubectl -n rook-ceph describe job/$(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}') || true + timeout 180 bash <<-'EOF' + until kubectl -n rook-ceph logs --follow job/$(kubectl -n rook-ceph get job -l app=rook-ceph-osd-prepare -o jsonpath='.items[0].metadata.name}') || true; do + sleep 5 + done +EOF + timeout 60 bash <<-'EOF' + until kubectl -n rook-ceph logs $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd,ceph_daemon_id=0 -o jsonpath='{.items[*].metadata.name}') --all-containers || true; do + echo "waiting for osd container" + sleep 1 + done +EOF + kubectl -n rook-ceph describe job/"$(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}')" || true kubectl -n rook-ceph describe deploy/rook-ceph-osd-0 || true } @@ -166,7 +178,7 @@ function wait_for_ceph_to_be_ready() { DAEMONS=$1 OSD_COUNT=$2 mkdir test - tests/scripts/validate_cluster.sh $DAEMONS $OSD_COUNT + tests/scripts/validate_cluster.sh "$DAEMONS" "$OSD_COUNT" kubectl -n rook-ceph get pods } @@ -189,73 +201,62 @@ function create_LV_on_disk() { kubectl create -f cluster/examples/kubernetes/ceph/common.yaml } -function generate_tls_config { -DIR=$1 -SERVICE=$2 -NAMESPACE=$3 -IP=$4 -if [ -z "${IP}" ]; then - IP=127.0.0.1 -fi - - openssl genrsa -out "${DIR}"/"${SERVICE}".key 2048 - - cat <"${DIR}"/csr.conf -[req] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[req_distinguished_name] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment -extendedKeyUsage = serverAuth -subjectAltName = @alt_names -[alt_names] -DNS.1 = ${SERVICE} -DNS.2 = ${SERVICE}.${NAMESPACE} -DNS.3 = ${SERVICE}.${NAMESPACE}.svc -DNS.4 = ${SERVICE}.${NAMESPACE}.svc.cluster.local -IP.1 = ${IP} -EOF - - openssl req -new -key "${DIR}"/"${SERVICE}".key -subj "/CN=${SERVICE}.${NAMESPACE}.svc" -out "${DIR}"/server.csr -config "${DIR}"/csr.conf - - export CSR_NAME=${SERVICE}-csr - - cat <"${DIR}"/csr.yaml -apiVersion: certificates.k8s.io/v1beta1 -kind: CertificateSigningRequest -metadata: - name: ${CSR_NAME} -spec: - groups: - - system:authenticated - request: $(cat ${DIR}/server.csr | base64 | tr -d '\n') - usages: - - digital signature - - key encipherment - - server auth -EOF +function deploy_first_rook_cluster() { + BLOCK=$(sudo lsblk|awk '/14G/ {print $1}'| head -1) + cd cluster/examples/kubernetes/ceph/ + kubectl create -f crds.yaml -f common.yaml -f operator.yaml + yq w -i -d1 cluster-test.yaml spec.dashboard.enabled false + yq w -i -d1 cluster-test.yaml spec.storage.useAllDevices false + yq w -i -d1 cluster-test.yaml spec.storage.deviceFilter "${BLOCK}"1 + kubectl create -f cluster-test.yaml -f toolbox.yaml +} - kubectl create -f "${DIR}/"csr.yaml +function wait_for_rgw_pods() { + for _ in {1..120}; do + if kubectl -n "$1" get pod -l app=rook-ceph-rgw -o jsonpath='{.items[0].metadata.name}'; then + echo "rgw pods found" + break + fi + echo "waiting for rgw pods" + sleep 5; + done - kubectl certificate approve ${CSR_NAME} +} - serverCert=$(kubectl get csr ${CSR_NAME} -o jsonpath='{.status.certificate}') - echo "${serverCert}" | openssl base64 -d -A -out "${DIR}"/"${SERVICE}".crt - kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}' | base64 -d > "${DIR}"/"${SERVICE}".ca +function deploy_second_rook_cluster() { + BLOCK=$(sudo lsblk|awk '/14G/ {print $1}'| head -1) + cd cluster/examples/kubernetes/ceph/ + NAMESPACE=rook-ceph-secondary envsubst < common-second-cluster.yaml | kubectl create -f - + sed -i 's/namespace: rook-ceph/namespace: rook-ceph-secondary/g' cluster-test.yaml + yq w -i -d1 cluster-test.yaml spec.storage.deviceFilter "${BLOCK}"2 + yq w -i -d1 cluster-test.yaml spec.dataDirHostPath "/var/lib/rook-external" + yq w -i toolbox.yaml metadata.namespace rook-ceph-secondary + kubectl create -f cluster-test.yaml -f toolbox.yaml } -selected_function="$1" -if [ "$selected_function" = "generate_tls_config" ]; then - $selected_function $2 $3 $4 $5 -elif [ "$selected_function" = "wait_for_ceph_to_be_ready" ]; then - $selected_function $2 $3 -else - $selected_function -fi +function write_object_to_cluster1_read_from_cluster2() { + cd cluster/examples/kubernetes/ceph/ + echo "[default]" > s3cfg + echo "host_bucket = no.way.in.hell" >> ./s3cfg + echo "use_https = False" >> ./s3cfg + fallocate -l 1M ./1M.dat + echo "hello world" >> ./1M.dat + CLUSTER_1_IP_ADDR=$(kubectl -n rook-ceph get svc rook-ceph-rgw-multisite-store -o jsonpath="{.spec.clusterIP}") + BASE64_ACCESS_KEY=$(kubectl -n rook-ceph get secrets realm-a-keys -o jsonpath="{.data.access-key}") + BASE64_SECRET_KEY=$(kubectl -n rook-ceph get secrets realm-a-keys -o jsonpath="{.data.secret-key}") + ACCESS_KEY=$(echo ${BASE64_ACCESS_KEY} | base64 --decode) + SECRET_KEY=$(echo ${BASE64_SECRET_KEY} | base64 --decode) + s3cmd --config=s3cfg --access_key=${ACCESS_KEY} --secret_key=${SECRET_KEY} --host=${CLUSTER_1_IP_ADDR} mb s3://bkt + s3cmd --config=s3cfg --access_key=${ACCESS_KEY} --secret_key=${SECRET_KEY} --host=${CLUSTER_1_IP_ADDR} put ./1M.dat s3://bkt + CLUSTER_2_IP_ADDR=$(kubectl -n rook-ceph-secondary get svc rook-ceph-rgw-zone-b-multisite-store -o jsonpath="{.spec.clusterIP}") + s3cmd --config=s3cfg --access_key=${ACCESS_KEY} --secret_key=${SECRET_KEY} --host=${CLUSTER_2_IP_ADDR} get s3://bkt/1M.dat 1M-get.dat --force + diff 1M.dat 1M-get.dat +} -if [ $? -ne 0 ]; then - echo "Function call to '$selected_function' was not successful" >&2 +FUNCTION="$1" +shift # remove function arg now that we've recorded it +# call the function with the remainder of the user-provided args +if ! $FUNCTION "$@"; then + echo "Call to $FUNCTION was not successful" >&2 exit 1 fi diff --git a/tests/scripts/helm.sh b/tests/scripts/helm.sh index c2f85c01c..0c70a010e 100755 --- a/tests/scripts/helm.sh +++ b/tests/scripts/helm.sh @@ -1,9 +1,7 @@ #!/bin/bash +e -scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" temp="/tmp/rook-tests-scripts-helm" -HELM="${temp}/helm" helm_version="${HELM_VERSION:-"v3.6.2"}" arch="${ARCH:-}" @@ -29,16 +27,12 @@ detectArch() { install() { # Download and unpack helm - if [ -x "${TEST_HELM_PATH}" ]; then - HELM="${TEST_HELM_PATH}" - else - local dist - dist="$(uname -s)" - dist=$(echo "${dist}" | tr "[:upper:]" "[:lower:]") - mkdir -p "${temp}" - wget "https://get.helm.sh/helm-${helm_version}-${dist}-${arch}.tar.gz" -O "${temp}/helm.tar.gz" - tar -C "${temp}" -xvf "${temp}/helm.tar.gz" --strip-components 1 - fi + local dist + dist="$(uname -s)" + dist=$(echo "${dist}" | tr "[:upper:]" "[:lower:]") + mkdir -p "${temp}" + wget "https://get.helm.sh/helm-${helm_version}-${dist}-${arch}.tar.gz" -O "${temp}/helm.tar.gz" + tar -C "${temp}" -xvf "${temp}/helm.tar.gz" --strip-components 1 } if [ -z "${arch}" ]; then diff --git a/tests/scripts/k8s-vagrant-multi-node.sh b/tests/scripts/k8s-vagrant-multi-node.sh deleted file mode 100755 index e51d0f924..000000000 --- a/tests/scripts/k8s-vagrant-multi-node.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash -e - -scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -REPO_DIR="${REPO_DIR:-${scriptdir}/../../.cache/k8s-vagrant-multi-node/}" -# shellcheck disable=SC1090 -source "${scriptdir}/../../build/common.sh" - -function init() { - if [ ! -d "${REPO_DIR}" ]; then - echo "k8s-vagrant-multi-node not found in rook cache dir. Cloning.." - mkdir -p "${REPO_DIR}" - git clone https://github.com/galexrt/k8s-vagrant-multi-node.git "${REPO_DIR}" - # checkout latest tag of the repo initially after clone - git -C "${REPO_DIR}" checkout "$(git -C "${REPO_DIR}" describe --tags `git rev-list --tags --max-count=1`)" - else - git -C "${REPO_DIR}" pull || { echo "git pull failed with exit code $?. continuing as the repo is already there ..."; } - fi -} - -# Deletes pods with 'rook-' prefix. Namespace is expected as the first argument -function delete_rook_pods() { - for P in $(kubectl get pods -n "$1" | awk "/$2/ {print \$1}"); do - kubectl delete pod "$P" -n "$1" - done -} - -# current kubectl context == minikube, returns boolean -function check_context() { - if [ "$(kubectl config view 2>/dev/null | awk '/current-context/ {print $NF}')" = "minikube" ]; then - return 0 - fi - - return 1 -} - -function copy_image_to_cluster() { - local build_image=$1 - local final_image=$2 - make load-image IMG="${build_image}" TAG="${final_image}" -} - -function copy_images() { - if [[ "$1" == "" || "$1" == "ceph" ]]; then - echo "copying ceph images" - copy_image_to_cluster "${BUILD_REGISTRY}/ceph-amd64" rook/ceph:master - fi - - if [[ "$1" == "" || "$1" == "cassandra" ]]; then - echo "copying cassandra image" - copy_image_to_cluster "${BUILD_REGISTRY}/cassandra-amd64" rook/cassandra:master - fi - - if [[ "$1" == "" || "$1" == "nfs" ]]; then - echo "copying nfs image" - copy_image_to_cluster "${BUILD_REGISTRY}/nfs-amd64" rook/nfs:master - fi -} - -init - -cd "${REPO_DIR}" || { echo "failed to access k8s-vagrant-multi-node dir ${REPO_DIR}. exiting."; exit 1; } - -case "${1:-}" in - status) - make status - ;; - up) - make up - copy_images "${2}" - ;; - update) - copy_images "${2}" - ;; - restart) - if check_context; then - [ "$2" ] && regex=$2 || regex="^rook-" - echo "Restarting Rook pods matching the regex \"$regex\" in the following namespaces.." - for ns in $(kubectl get ns -o name | grep '^rook-'); do - echo "-> $ns" - delete_rook_pods "$ns" $regex - done - else - echo "To prevent accidental data loss acting only on 'minikube' context. No action is taken." - fi - ;; - helm) - echo " copying rook image for helm" - helm_tag="$(cat _output/version)" - copy_image_to_cluster "${BUILD_REGISTRY}/ceph-amd64" "rook/ceph:${helm_tag}" - ;; - clean) - make clean - ;; - shell) - "${SHELL}" - ;; - *) - echo "usage:" >&2 - echo " $0 status" >&2 - echo " $0 up [ceph | cassandra | nfs]" >&2 - echo " $0 update" >&2 - echo " $0 restart" >&2 - echo " $0 helm" >&2 - echo " $0 clean" >&2 - echo " $0 shell - Open a '${SHELL}' shell in the cloned k8s-vagrant-multi-node project." >&2 - echo " $0 help" >&2 - ;; -esac diff --git a/tests/scripts/kubeadm-dind.sh b/tests/scripts/kubeadm-dind.sh deleted file mode 100755 index 259dfd55e..000000000 --- a/tests/scripts/kubeadm-dind.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash +e - -scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "${scriptdir}/../../build/common.sh" - -tarname=image.tar -tarfile=${WORK_DIR}/tests/${tarname} - -copy_image_to_cluster() { - local final_image=$1 - - mkdir -p ${WORK_DIR}/tests - docker save -o ${tarfile} ${final_image} - for c in kube-master kube-node-1 kube-node-2; do - docker cp ${tarfile} ${c}:/ - docker exec ${c} /bin/bash -c "docker load -i /${tarname}" - done -} - -copy_rbd() { - for c in kube-master kube-node-1 kube-node-2; do - docker cp ${scriptdir}/dind-cluster-rbd ${c}:/bin/rbd - docker exec ${c} /bin/bash -c "chmod +x /bin/rbd" - # hack for Azure, after vm is started first docker pull command fails intermittently - local maxRetry=3 - local cur=1 - while [ $cur -le $maxRetry ]; do - docker exec ${c} /bin/bash -c "docker pull ceph/base" - if [ $? -eq 0 ]; then - break - fi - sleep 1 - ((++cur)) - done - done -} - -# configure dind-cluster -export EMBEDDED_CONFIG=1 -export KUBECTL_DIR=${KUBEADM_DIND_DIR} -export DIND_SUBNET=10.192.0.0 -export APISERVER_PORT=${APISERVER_PORT:-8080} -export NUM_NODES=${NUM_NODES:-2} -export KUBE_VERSION=${KUBE_VERSION:-"v1.6"} -export DIND_IMAGE="${DIND_IMAGE:-mirantis/kubeadm-dind-cluster:${KUBE_VERSION}}" -export CNI_PLUGIN="${CNI_PLUGIN:-bridge}" - -case "${1:-}" in - up) - ${scriptdir}/dind-cluster.sh reup - ${scriptdir}/makeTestImages.sh save amd64 || true - copy_image_to_cluster rook/ceph:master - set +e - copy_image_to_cluster ceph/base ceph/base:latest - set -e - copy_rbd - ;; - down) - ${scriptdir}/dind-cluster.sh down - ;; - clean) - ${scriptdir}/dind-cluster.sh clean - ;; - update) - copy_image_to_cluster rook/ceph:master - ;; - wordpress) - copy_image_to_cluster mysql:5.6 - copy_image_to_cluster wordpress:4.6.1-apache - ;; - *) - echo "usage:" >&2 - echo " $0 up" >&2 - echo " $0 down" >&2 - echo " $0 clean" >&2 - echo " $0 update" >&2 - echo " $0 wordpress" >&2 -esac diff --git a/tests/scripts/kubeadm-install.sh b/tests/scripts/kubeadm-install.sh index 629b480ef..aa9b73eca 100755 --- a/tests/scripts/kubeadm-install.sh +++ b/tests/scripts/kubeadm-install.sh @@ -1,7 +1,6 @@ #!/bin/bash +e KUBE_VERSION=${1:-"v1.15.12"} -ARCH=$(dpkg --print-architecture) null_str= KUBE_INSTALL_VERSION="${KUBE_VERSION/v/$null_str}"-00 @@ -17,7 +16,7 @@ wait_for_dpkg_unlock() { retryInterval=10 until [ ${retry} -ge ${maxRetries} ] do - if [[ `sudo lsof /var/lib/dpkg/lock|wc -l` -le 0 ]]; then + if [[ $(sudo lsof /var/lib/dpkg/lock|wc -l) -le 0 ]]; then break fi ((++retry)) @@ -25,7 +24,7 @@ wait_for_dpkg_unlock() { sleep ${retryInterval} done - if [ ${retry} -ge ${maxRetries} ]; then + if [ "${retry}" -ge ${maxRetries} ]; then echo "Failed after ${maxRetries} attempts! - cannot install kubeadm" exit 1 fi @@ -39,9 +38,7 @@ wait_for_dpkg_unlock sudo apt-get install -y apt-transport-https sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -sudo cat </etc/apt/sources.list.d/kubernetes.list -deb http://apt.kubernetes.io/ kubernetes-xenial main -EOF +echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list sudo apt-get update wait_for_dpkg_unlock diff --git a/tests/scripts/kubeadm.sh b/tests/scripts/kubeadm.sh index 4203f150a..6e6d4693e 100755 --- a/tests/scripts/kubeadm.sh +++ b/tests/scripts/kubeadm.sh @@ -3,7 +3,7 @@ scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" tarname=image.tar -tarfile="${WORK_DIR}/tests/${tarname}" +export tarfile="${WORK_DIR}/tests/${tarname}" export KUBE_VERSION=${KUBE_VERSION:-"v1.15.12"} @@ -38,10 +38,10 @@ usage(){ #install k8s master node install_master(){ - sudo kubeadm init $skippreflightcheck --kubernetes-version ${KUBE_VERSION} + sudo kubeadm init $skippreflightcheck --kubernetes-version "${KUBE_VERSION}" - sudo cp /etc/kubernetes/admin.conf $HOME/ - sudo chown $(id -u):$(id -g) $HOME/admin.conf + sudo cp /etc/kubernetes/admin.conf "$HOME"/ + sudo chown "$(id -u)":"$(id -g)" "$HOME"/admin.conf export KUBECONFIG=$HOME/admin.conf kubectl taint nodes --all node-role.kubernetes.io/master- @@ -50,7 +50,7 @@ install_master(){ echo "wait for K8s master node to be Ready" INC=0 while [[ $INC -lt 20 ]]; do - kube_ready=$(kubectl get node -o jsonpath='{.items['$count'].status.conditions[?(@.reason == "KubeletReady")].status}') + kube_ready=$(kubectl get node -o jsonpath='{.items['"$count"'].status.conditions[?(@.reason == "KubeletReady")].status}') if [ "${kube_ready}" == "True" ]; then break fi @@ -71,7 +71,7 @@ install_master(){ install_node(){ echo "inside install node function" echo "kubeadm join ${1} ${2} ${3} ${4} ${5} $skippreflightcheck" - sudo kubeadm join ${1} ${2} ${3} ${4} ${5} $skippreflightcheck || true + sudo kubeadm join "${1}" "${2}" "${3}" "${4}" "${5}" $skippreflightcheck || true } #wait for all nodes in the cluster to be ready status @@ -79,8 +79,8 @@ wait_for_ready(){ #expect 3 node cluster by default local numberOfNode=${1:-3} local count=0 - sudo cp /etc/kubernetes/admin.conf $HOME/ - sudo chown $(id -u):$(id -g) $HOME/admin.conf + sudo cp /etc/kubernetes/admin.conf "$HOME"/ + sudo chown "$(id -u)":"$(id -g)" "$HOME"/admin.conf export KUBECONFIG=$HOME/admin.conf until [[ $count -eq $numberOfNode ]]; do @@ -113,8 +113,8 @@ wait_for_ready(){ kubeadm_reset() { kubectl delete -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" sudo kubeadm reset --force $skippreflightcheck - rm $HOME/admin.conf - rm -rf $HOME/.kube + rm "$HOME"/admin.conf + rm -rf "$HOME"/.kube sudo apt-get -y remove kubelet sudo apt-get -y remove kubeadm sudo swapon -a @@ -125,7 +125,7 @@ case "${1:-}" in up) sudo sh -c "${scriptdir}/kubeadm-install.sh ${KUBE_VERSION}" install_master - ${scriptdir}/makeTestImages.sh tag ${arch} || true + "${scriptdir}"/makeTestImages.sh tag ${arch} || true ;; clean) kubeadm_reset @@ -143,7 +143,7 @@ case "${1:-}" in ;; node) if [ "$#" -eq 5 ] || [ "$#" -eq 7 ]; then - install_node $3 $4 $5 $6 $7 + install_node "$3" "$4" "$5" "$6" "$7" else echo "invalid arguments for install node" usage @@ -159,7 +159,7 @@ case "${1:-}" in ;; wait) if [ "$#" -eq 2 ]; then - wait_for_ready $2 + wait_for_ready "$2" else echo "invalid number of arguments for wait" usage diff --git a/tests/scripts/localPathPV.sh b/tests/scripts/localPathPV.sh index d370eb8a9..8af20dad0 100755 --- a/tests/scripts/localPathPV.sh +++ b/tests/scripts/localPathPV.sh @@ -37,7 +37,7 @@ sudo test ! -b "${test_scratch_device}" && echo "invalid scratch device, not a b function prepare_node() { sudo rm -rf /var/lib/rook/rook-integration-test sudo mkdir -p /var/lib/rook/rook-integration-test/mon1 /var/lib/rook/rook-integration-test/mon2 /var/lib/rook/rook-integration-test/mon3 - node_name=$(kubectl get nodes -o jsonpath={.items[*].metadata.name}) + node_name=$(kubectl get nodes -o jsonpath='{.items[*].metadata.name}') kubectl label nodes "${node_name}" rook.io/has-disk=true kubectl delete pv -l type=local } @@ -124,14 +124,14 @@ eof function create_osd_pvc() { local osd_count=$1 local storage=6Gi - + for osd in $(seq 1 "$osd_count"); do path=${test_scratch_device}${osd} if [ "$osd_count" -eq 1 ]; then path=$test_scratch_device storage=10Gi fi - + cat <&2 @@ -36,7 +36,7 @@ case "${1:-}" in load) case "${2:-}" in arm|arm64|amd64) - echo "Loading archived images to docker: $(ls | grep tar)" + echo "Loading archived images to docker: $(ls ./*tar*)" docker load -i "ceph-$2.tar" docker load -i "cassandra-$2.tar" diff --git a/tests/scripts/minikube.sh b/tests/scripts/minikube.sh index cb4723585..361c61843 100755 --- a/tests/scripts/minikube.sh +++ b/tests/scripts/minikube.sh @@ -1,7 +1,7 @@ #!/bin/bash -e scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -# shellcheck disable=SC1090 +# source=build/common.sh source "${scriptdir}/../../build/common.sh" function wait_for_ssh() { @@ -22,7 +22,7 @@ function copy_image_to_cluster() { local final_image=$2 local docker_env_tag="${DOCKERCMD}-env" ${DOCKERCMD} save "${build_image}" | \ - (eval "$(minikube ${docker_env_tag} --shell bash)" && \ + (eval "$(minikube "${docker_env_tag}" --shell bash)" && \ ${DOCKERCMD} load && \ ${DOCKERCMD} tag "${build_image}" "${final_image}") } diff --git a/tests/scripts/minishift.sh b/tests/scripts/minishift.sh index ff3a4cb44..9db28c61b 100755 --- a/tests/scripts/minishift.sh +++ b/tests/scripts/minishift.sh @@ -1,15 +1,16 @@ #!/bin/bash -e scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +# source=build/common.sh source "${scriptdir}/../../build/common.sh" function wait_for_ssh() { local tries=100 - while (( ${tries} > 0 )) ; do - if `minishift ssh echo connected &> /dev/null` ; then + while (( tries > 0 )) ; do + if minishift ssh echo connected &> /dev/null ; then return 0 fi - tries=$(( ${tries} - 1 )) + (( tries-- )) sleep 0.1 done echo ERROR: ssh did not come up >&2 @@ -19,7 +20,7 @@ function wait_for_ssh() { function copy_image_to_cluster() { local build_image=$1 local final_image=$2 - docker save ${build_image} | (eval $(minishift docker-env --shell bash) && docker load && docker tag ${build_image} ${final_image}) + docker save "${build_image}" | (eval "$(minishift docker-env --shell bash)" && docker load && docker tag "${build_image}" "${final_image}") } # current kubectl context == minishift, returns boolean @@ -37,12 +38,12 @@ MEMORY=${MEMORY:-"3000"} case "${1:-}" in up) echo "starting minishift" - minishift start --memory=${MEMORY} --vm-driver=virtualbox --iso-url centos + minishift start --memory="${MEMORY}" --vm-driver=virtualbox --iso-url centos wait_for_ssh # create a link so the default dataDirHostPath will work for this environment #minishift ssh "sudo mkdir /mnt/sda1/var/lib/rook;sudo ln -s /mnt/sda1/var/lib/rook /var/lib/rook" - copy_image_to_cluster ${BUILD_REGISTRY}/ceph-amd64 rook/ceph:master + copy_image_to_cluster "${BUILD_REGISTRY}"/ceph-amd64 rook/ceph:master ;; down) minishift delete -f @@ -53,7 +54,7 @@ case "${1:-}" in ;; update) echo "updating the rook images" - copy_image_to_cluster ${BUILD_REGISTRY}/ceph-amd64 rook/ceph:master + copy_image_to_cluster "${BUILD_REGISTRY}"/ceph-amd64 rook/ceph:master ;; restart) if check_context; then @@ -73,8 +74,8 @@ case "${1:-}" in ;; helm) echo " copying rook image for helm" - helm_tag="`cat _output/version`" - copy_image_to_cluster ${BUILD_REGISTRY}/rook-amd64 rook/rook:${helm_tag} + helm_tag="cat _output/version" + copy_image_to_cluster "${BUILD_REGISTRY}"/rook-amd64 rook/rook:"${helm_tag}" ;; clean) minishift delete -f diff --git a/tests/scripts/webhook-patch-ca-bundle.sh b/tests/scripts/webhook-patch-ca-bundle.sh index a41388ec1..4618410e4 100755 --- a/tests/scripts/webhook-patch-ca-bundle.sh +++ b/tests/scripts/webhook-patch-ca-bundle.sh @@ -5,7 +5,8 @@ set -o errexit set -o nounset set -o pipefail -export CA_BUNDLE=$(kubectl get configmap -n kube-system extension-apiserver-authentication -o=jsonpath='{.data.client-ca-file}' | base64 | tr -d '\n') +CA_BUNDLE=$(kubectl get configmap -n kube-system extension-apiserver-authentication -o=jsonpath='{.data.client-ca-file}' | base64 | tr -d '\n') +export CA_BUNDLE if command -v envsubst >/dev/null 2>&1; then envsubst